1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
10 const struct ce_attr ath11k_host_ce_config_ipq8074
[] = {
11 /* CE0: host->target HTC control and raw streams */
13 .flags
= CE_ATTR_FLAGS
,
19 /* CE1: target->host HTT + HTC control */
21 .flags
= CE_ATTR_FLAGS
,
25 .recv_cb
= ath11k_htc_rx_completion_handler
,
28 /* CE2: target->host WMI */
30 .flags
= CE_ATTR_FLAGS
,
34 .recv_cb
= ath11k_htc_rx_completion_handler
,
37 /* CE3: host->target WMI (mac0) */
39 .flags
= CE_ATTR_FLAGS
,
45 /* CE4: host->target HTT */
47 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
53 /* CE5: target->host pktlog */
55 .flags
= CE_ATTR_FLAGS
,
59 .recv_cb
= ath11k_dp_htt_htc_t2h_msg_handler
,
62 /* CE6: target autonomous hif_memcpy */
64 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
70 /* CE7: host->target WMI (mac1) */
72 .flags
= CE_ATTR_FLAGS
,
78 /* CE8: target autonomous hif_memcpy */
80 .flags
= CE_ATTR_FLAGS
,
86 /* CE9: host->target WMI (mac2) */
88 .flags
= CE_ATTR_FLAGS
,
94 /* CE10: target->host HTT */
96 .flags
= CE_ATTR_FLAGS
,
100 .recv_cb
= ath11k_htc_rx_completion_handler
,
105 .flags
= CE_ATTR_FLAGS
,
112 const struct ce_attr ath11k_host_ce_config_qca6390
[] = {
113 /* CE0: host->target HTC control and raw streams */
115 .flags
= CE_ATTR_FLAGS
,
121 /* CE1: target->host HTT + HTC control */
123 .flags
= CE_ATTR_FLAGS
,
126 .dest_nentries
= 512,
127 .recv_cb
= ath11k_htc_rx_completion_handler
,
130 /* CE2: target->host WMI */
132 .flags
= CE_ATTR_FLAGS
,
135 .dest_nentries
= 512,
136 .recv_cb
= ath11k_htc_rx_completion_handler
,
139 /* CE3: host->target WMI (mac0) */
141 .flags
= CE_ATTR_FLAGS
,
147 /* CE4: host->target HTT */
149 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
150 .src_nentries
= 2048,
155 /* CE5: target->host pktlog */
157 .flags
= CE_ATTR_FLAGS
,
160 .dest_nentries
= 512,
161 .recv_cb
= ath11k_dp_htt_htc_t2h_msg_handler
,
164 /* CE6: target autonomous hif_memcpy */
166 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
172 /* CE7: host->target WMI (mac1) */
174 .flags
= CE_ATTR_FLAGS
,
180 /* CE8: target autonomous hif_memcpy */
182 .flags
= CE_ATTR_FLAGS
,
190 static bool ath11k_ce_need_shadow_fix(int ce_id
)
192 /* only ce4 needs shadow workaroud*/
198 void ath11k_ce_stop_shadow_timers(struct ath11k_base
*ab
)
202 if (!ab
->hw_params
.supports_shadow_regs
)
205 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++)
206 if (ath11k_ce_need_shadow_fix(i
))
207 ath11k_dp_shadow_stop_timer(ab
, &ab
->ce
.hp_timer
[i
]);
210 static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe
*pipe
,
211 struct sk_buff
*skb
, dma_addr_t paddr
)
213 struct ath11k_base
*ab
= pipe
->ab
;
214 struct ath11k_ce_ring
*ring
= pipe
->dest_ring
;
215 struct hal_srng
*srng
;
216 unsigned int write_index
;
217 unsigned int nentries_mask
= ring
->nentries_mask
;
221 lockdep_assert_held(&ab
->ce
.ce_lock
);
223 write_index
= ring
->write_index
;
225 srng
= &ab
->hal
.srng_list
[ring
->hal_ring_id
];
227 spin_lock_bh(&srng
->lock
);
229 ath11k_hal_srng_access_begin(ab
, srng
);
231 if (unlikely(ath11k_hal_srng_src_num_free(ab
, srng
, false) < 1)) {
236 desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
);
242 ath11k_hal_ce_dst_set_desc(desc
, paddr
);
244 ring
->skb
[write_index
] = skb
;
245 write_index
= CE_RING_IDX_INCR(nentries_mask
, write_index
);
246 ring
->write_index
= write_index
;
248 pipe
->rx_buf_needed
--;
252 ath11k_hal_srng_access_end(ab
, srng
);
254 spin_unlock_bh(&srng
->lock
);
259 static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe
*pipe
)
261 struct ath11k_base
*ab
= pipe
->ab
;
266 if (!(pipe
->dest_ring
|| pipe
->status_ring
))
269 spin_lock_bh(&ab
->ce
.ce_lock
);
270 while (pipe
->rx_buf_needed
) {
271 skb
= dev_alloc_skb(pipe
->buf_sz
);
277 WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb
->data
, 4));
279 paddr
= dma_map_single(ab
->dev
, skb
->data
,
280 skb
->len
+ skb_tailroom(skb
),
282 if (unlikely(dma_mapping_error(ab
->dev
, paddr
))) {
283 ath11k_warn(ab
, "failed to dma map ce rx buf\n");
284 dev_kfree_skb_any(skb
);
289 ATH11K_SKB_RXCB(skb
)->paddr
= paddr
;
291 ret
= ath11k_ce_rx_buf_enqueue_pipe(pipe
, skb
, paddr
);
294 ath11k_warn(ab
, "failed to enqueue rx buf: %d\n", ret
);
295 dma_unmap_single(ab
->dev
, paddr
,
296 skb
->len
+ skb_tailroom(skb
),
298 dev_kfree_skb_any(skb
);
304 spin_unlock_bh(&ab
->ce
.ce_lock
);
308 static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe
*pipe
,
309 struct sk_buff
**skb
, int *nbytes
)
311 struct ath11k_base
*ab
= pipe
->ab
;
312 struct hal_srng
*srng
;
313 unsigned int sw_index
;
314 unsigned int nentries_mask
;
318 spin_lock_bh(&ab
->ce
.ce_lock
);
320 sw_index
= pipe
->dest_ring
->sw_index
;
321 nentries_mask
= pipe
->dest_ring
->nentries_mask
;
323 srng
= &ab
->hal
.srng_list
[pipe
->status_ring
->hal_ring_id
];
325 spin_lock_bh(&srng
->lock
);
327 ath11k_hal_srng_access_begin(ab
, srng
);
329 desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
);
335 *nbytes
= ath11k_hal_ce_dst_status_get_length(desc
);
341 *skb
= pipe
->dest_ring
->skb
[sw_index
];
342 pipe
->dest_ring
->skb
[sw_index
] = NULL
;
344 sw_index
= CE_RING_IDX_INCR(nentries_mask
, sw_index
);
345 pipe
->dest_ring
->sw_index
= sw_index
;
347 pipe
->rx_buf_needed
++;
349 ath11k_hal_srng_access_end(ab
, srng
);
351 spin_unlock_bh(&srng
->lock
);
353 spin_unlock_bh(&ab
->ce
.ce_lock
);
358 static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe
*pipe
)
360 struct ath11k_base
*ab
= pipe
->ab
;
362 struct sk_buff_head list
;
363 unsigned int nbytes
, max_nbytes
;
366 __skb_queue_head_init(&list
);
367 while (ath11k_ce_completed_recv_next(pipe
, &skb
, &nbytes
) == 0) {
368 max_nbytes
= skb
->len
+ skb_tailroom(skb
);
369 dma_unmap_single(ab
->dev
, ATH11K_SKB_RXCB(skb
)->paddr
,
370 max_nbytes
, DMA_FROM_DEVICE
);
372 if (unlikely(max_nbytes
< nbytes
)) {
373 ath11k_warn(ab
, "rxed more than expected (nbytes %d, max %d)",
375 dev_kfree_skb_any(skb
);
379 skb_put(skb
, nbytes
);
380 __skb_queue_tail(&list
, skb
);
383 while ((skb
= __skb_dequeue(&list
))) {
384 ath11k_dbg(ab
, ATH11K_DBG_AHB
, "rx ce pipe %d len %d\n",
385 pipe
->pipe_num
, skb
->len
);
386 pipe
->recv_cb(ab
, skb
);
389 ret
= ath11k_ce_rx_post_pipe(pipe
);
390 if (ret
&& ret
!= -ENOSPC
) {
391 ath11k_warn(ab
, "failed to post rx buf to pipe: %d err: %d\n",
392 pipe
->pipe_num
, ret
);
393 mod_timer(&ab
->rx_replenish_retry
,
394 jiffies
+ ATH11K_CE_RX_POST_RETRY_JIFFIES
);
398 static struct sk_buff
*ath11k_ce_completed_send_next(struct ath11k_ce_pipe
*pipe
)
400 struct ath11k_base
*ab
= pipe
->ab
;
401 struct hal_srng
*srng
;
402 unsigned int sw_index
;
403 unsigned int nentries_mask
;
407 spin_lock_bh(&ab
->ce
.ce_lock
);
409 sw_index
= pipe
->src_ring
->sw_index
;
410 nentries_mask
= pipe
->src_ring
->nentries_mask
;
412 srng
= &ab
->hal
.srng_list
[pipe
->src_ring
->hal_ring_id
];
414 spin_lock_bh(&srng
->lock
);
416 ath11k_hal_srng_access_begin(ab
, srng
);
418 desc
= ath11k_hal_srng_src_reap_next(ab
, srng
);
424 skb
= pipe
->src_ring
->skb
[sw_index
];
426 pipe
->src_ring
->skb
[sw_index
] = NULL
;
428 sw_index
= CE_RING_IDX_INCR(nentries_mask
, sw_index
);
429 pipe
->src_ring
->sw_index
= sw_index
;
432 spin_unlock_bh(&srng
->lock
);
434 spin_unlock_bh(&ab
->ce
.ce_lock
);
439 static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe
*pipe
)
441 struct ath11k_base
*ab
= pipe
->ab
;
444 while (!IS_ERR(skb
= ath11k_ce_completed_send_next(pipe
))) {
448 dma_unmap_single(ab
->dev
, ATH11K_SKB_CB(skb
)->paddr
, skb
->len
,
450 dev_kfree_skb_any(skb
);
454 static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base
*ab
, u32 ce_id
,
455 struct hal_srng_params
*ring_params
)
464 ret
= ath11k_get_user_msi_vector(ab
, "CE",
465 &msi_data_count
, &msi_data_start
,
471 ath11k_get_msi_address(ab
, &addr_lo
, &addr_hi
);
473 ring_params
->msi_addr
= addr_lo
;
474 ring_params
->msi_addr
|= (dma_addr_t
)(((uint64_t)addr_hi
) << 32);
475 ring_params
->msi_data
= (ce_id
% msi_data_count
) + msi_data_start
;
476 ring_params
->flags
|= HAL_SRNG_FLAGS_MSI_INTR
;
479 static int ath11k_ce_init_ring(struct ath11k_base
*ab
,
480 struct ath11k_ce_ring
*ce_ring
,
481 int ce_id
, enum hal_ring_type type
)
483 struct hal_srng_params params
= { 0 };
486 params
.ring_base_paddr
= ce_ring
->base_addr_ce_space
;
487 params
.ring_base_vaddr
= ce_ring
->base_addr_owner_space
;
488 params
.num_entries
= ce_ring
->nentries
;
490 if (!(CE_ATTR_DIS_INTR
& ab
->hw_params
.host_ce_config
[ce_id
].flags
))
491 ath11k_ce_srng_msi_ring_params_setup(ab
, ce_id
, ¶ms
);
495 if (!(CE_ATTR_DIS_INTR
& ab
->hw_params
.host_ce_config
[ce_id
].flags
))
496 params
.intr_batch_cntr_thres_entries
= 1;
499 params
.max_buffer_len
= ab
->hw_params
.host_ce_config
[ce_id
].src_sz_max
;
500 if (!(ab
->hw_params
.host_ce_config
[ce_id
].flags
& CE_ATTR_DIS_INTR
)) {
501 params
.intr_timer_thres_us
= 1024;
502 params
.flags
|= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN
;
503 params
.low_threshold
= ce_ring
->nentries
- 3;
506 case HAL_CE_DST_STATUS
:
507 if (!(ab
->hw_params
.host_ce_config
[ce_id
].flags
& CE_ATTR_DIS_INTR
)) {
508 params
.intr_batch_cntr_thres_entries
= 1;
509 params
.intr_timer_thres_us
= 0x1000;
513 ath11k_warn(ab
, "Invalid CE ring type %d\n", type
);
517 /* TODO: Init other params needed by HAL to init the ring */
519 ret
= ath11k_hal_srng_setup(ab
, type
, ce_id
, 0, ¶ms
);
521 ath11k_warn(ab
, "failed to setup srng: %d ring_id %d\n",
526 ce_ring
->hal_ring_id
= ret
;
528 if (ab
->hw_params
.supports_shadow_regs
&&
529 ath11k_ce_need_shadow_fix(ce_id
))
530 ath11k_dp_shadow_init_timer(ab
, &ab
->ce
.hp_timer
[ce_id
],
531 ATH11K_SHADOW_CTRL_TIMER_INTERVAL
,
532 ce_ring
->hal_ring_id
);
537 static struct ath11k_ce_ring
*
538 ath11k_ce_alloc_ring(struct ath11k_base
*ab
, int nentries
, int desc_sz
)
540 struct ath11k_ce_ring
*ce_ring
;
541 dma_addr_t base_addr
;
543 ce_ring
= kzalloc(struct_size(ce_ring
, skb
, nentries
), GFP_KERNEL
);
545 return ERR_PTR(-ENOMEM
);
547 ce_ring
->nentries
= nentries
;
548 ce_ring
->nentries_mask
= nentries
- 1;
550 /* Legacy platforms that do not support cache
551 * coherent DMA are unsupported
553 ce_ring
->base_addr_owner_space_unaligned
=
554 dma_alloc_coherent(ab
->dev
,
555 nentries
* desc_sz
+ CE_DESC_RING_ALIGN
,
556 &base_addr
, GFP_KERNEL
);
557 if (!ce_ring
->base_addr_owner_space_unaligned
) {
559 return ERR_PTR(-ENOMEM
);
562 ce_ring
->base_addr_ce_space_unaligned
= base_addr
;
564 ce_ring
->base_addr_owner_space
= PTR_ALIGN(
565 ce_ring
->base_addr_owner_space_unaligned
,
567 ce_ring
->base_addr_ce_space
= ALIGN(
568 ce_ring
->base_addr_ce_space_unaligned
,
574 static int ath11k_ce_alloc_pipe(struct ath11k_base
*ab
, int ce_id
)
576 struct ath11k_ce_pipe
*pipe
= &ab
->ce
.ce_pipe
[ce_id
];
577 const struct ce_attr
*attr
= &ab
->hw_params
.host_ce_config
[ce_id
];
578 struct ath11k_ce_ring
*ring
;
582 pipe
->attr_flags
= attr
->flags
;
584 if (attr
->src_nentries
) {
585 pipe
->send_cb
= ath11k_ce_send_done_cb
;
586 nentries
= roundup_pow_of_two(attr
->src_nentries
);
587 desc_sz
= ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC
);
588 ring
= ath11k_ce_alloc_ring(ab
, nentries
, desc_sz
);
590 return PTR_ERR(ring
);
591 pipe
->src_ring
= ring
;
594 if (attr
->dest_nentries
) {
595 pipe
->recv_cb
= attr
->recv_cb
;
596 nentries
= roundup_pow_of_two(attr
->dest_nentries
);
597 desc_sz
= ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST
);
598 ring
= ath11k_ce_alloc_ring(ab
, nentries
, desc_sz
);
600 return PTR_ERR(ring
);
601 pipe
->dest_ring
= ring
;
603 desc_sz
= ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS
);
604 ring
= ath11k_ce_alloc_ring(ab
, nentries
, desc_sz
);
606 return PTR_ERR(ring
);
607 pipe
->status_ring
= ring
;
613 void ath11k_ce_per_engine_service(struct ath11k_base
*ab
, u16 ce_id
)
615 struct ath11k_ce_pipe
*pipe
= &ab
->ce
.ce_pipe
[ce_id
];
621 ath11k_ce_recv_process_cb(pipe
);
624 void ath11k_ce_poll_send_completed(struct ath11k_base
*ab
, u8 pipe_id
)
626 struct ath11k_ce_pipe
*pipe
= &ab
->ce
.ce_pipe
[pipe_id
];
628 if ((pipe
->attr_flags
& CE_ATTR_DIS_INTR
) && pipe
->send_cb
)
631 EXPORT_SYMBOL(ath11k_ce_per_engine_service
);
633 int ath11k_ce_send(struct ath11k_base
*ab
, struct sk_buff
*skb
, u8 pipe_id
,
636 struct ath11k_ce_pipe
*pipe
= &ab
->ce
.ce_pipe
[pipe_id
];
637 struct hal_srng
*srng
;
639 unsigned int write_index
, sw_index
;
640 unsigned int nentries_mask
;
642 u8 byte_swap_data
= 0;
645 /* Check if some entries could be regained by handling tx completion if
646 * the CE has interrupts disabled and the used entries is more than the
647 * defined usage threshold.
649 if (pipe
->attr_flags
& CE_ATTR_DIS_INTR
) {
650 spin_lock_bh(&ab
->ce
.ce_lock
);
651 write_index
= pipe
->src_ring
->write_index
;
653 sw_index
= pipe
->src_ring
->sw_index
;
655 if (write_index
>= sw_index
)
656 num_used
= write_index
- sw_index
;
658 num_used
= pipe
->src_ring
->nentries
- sw_index
+
661 spin_unlock_bh(&ab
->ce
.ce_lock
);
663 if (num_used
> ATH11K_CE_USAGE_THRESHOLD
)
664 ath11k_ce_poll_send_completed(ab
, pipe
->pipe_num
);
667 if (test_bit(ATH11K_FLAG_CRASH_FLUSH
, &ab
->dev_flags
))
670 spin_lock_bh(&ab
->ce
.ce_lock
);
672 write_index
= pipe
->src_ring
->write_index
;
673 nentries_mask
= pipe
->src_ring
->nentries_mask
;
675 srng
= &ab
->hal
.srng_list
[pipe
->src_ring
->hal_ring_id
];
677 spin_lock_bh(&srng
->lock
);
679 ath11k_hal_srng_access_begin(ab
, srng
);
681 if (unlikely(ath11k_hal_srng_src_num_free(ab
, srng
, false) < 1)) {
682 ath11k_hal_srng_access_end(ab
, srng
);
687 desc
= ath11k_hal_srng_src_get_next_reaped(ab
, srng
);
689 ath11k_hal_srng_access_end(ab
, srng
);
694 if (pipe
->attr_flags
& CE_ATTR_BYTE_SWAP_DATA
)
697 ath11k_hal_ce_src_set_desc(desc
, ATH11K_SKB_CB(skb
)->paddr
,
698 skb
->len
, transfer_id
, byte_swap_data
);
700 pipe
->src_ring
->skb
[write_index
] = skb
;
701 pipe
->src_ring
->write_index
= CE_RING_IDX_INCR(nentries_mask
,
704 ath11k_hal_srng_access_end(ab
, srng
);
706 if (ath11k_ce_need_shadow_fix(pipe_id
))
707 ath11k_dp_shadow_start_timer(ab
, srng
, &ab
->ce
.hp_timer
[pipe_id
]);
709 spin_unlock_bh(&srng
->lock
);
711 spin_unlock_bh(&ab
->ce
.ce_lock
);
716 spin_unlock_bh(&srng
->lock
);
718 spin_unlock_bh(&ab
->ce
.ce_lock
);
723 static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe
*pipe
)
725 struct ath11k_base
*ab
= pipe
->ab
;
726 struct ath11k_ce_ring
*ring
= pipe
->dest_ring
;
730 if (!(ring
&& pipe
->buf_sz
))
733 for (i
= 0; i
< ring
->nentries
; i
++) {
739 dma_unmap_single(ab
->dev
, ATH11K_SKB_RXCB(skb
)->paddr
,
740 skb
->len
+ skb_tailroom(skb
), DMA_FROM_DEVICE
);
741 dev_kfree_skb_any(skb
);
745 static void ath11k_ce_shadow_config(struct ath11k_base
*ab
)
749 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
750 if (ab
->hw_params
.host_ce_config
[i
].src_nentries
)
751 ath11k_hal_srng_update_shadow_config(ab
,
754 if (ab
->hw_params
.host_ce_config
[i
].dest_nentries
) {
755 ath11k_hal_srng_update_shadow_config(ab
,
758 ath11k_hal_srng_update_shadow_config(ab
,
759 HAL_CE_DST_STATUS
, i
);
764 void ath11k_ce_get_shadow_config(struct ath11k_base
*ab
,
765 u32
**shadow_cfg
, u32
*shadow_cfg_len
)
767 if (!ab
->hw_params
.supports_shadow_regs
)
770 ath11k_hal_srng_get_shadow_config(ab
, shadow_cfg
, shadow_cfg_len
);
772 /* shadow is already configured */
776 /* shadow isn't configured yet, configure now.
777 * non-CE srngs are configured firstly, then
780 ath11k_hal_srng_shadow_config(ab
);
781 ath11k_ce_shadow_config(ab
);
783 /* get the shadow configuration */
784 ath11k_hal_srng_get_shadow_config(ab
, shadow_cfg
, shadow_cfg_len
);
786 EXPORT_SYMBOL(ath11k_ce_get_shadow_config
);
788 void ath11k_ce_cleanup_pipes(struct ath11k_base
*ab
)
790 struct ath11k_ce_pipe
*pipe
;
793 ath11k_ce_stop_shadow_timers(ab
);
795 for (pipe_num
= 0; pipe_num
< ab
->hw_params
.ce_count
; pipe_num
++) {
796 pipe
= &ab
->ce
.ce_pipe
[pipe_num
];
797 ath11k_ce_rx_pipe_cleanup(pipe
);
799 /* Cleanup any src CE's which have interrupts disabled */
800 ath11k_ce_poll_send_completed(ab
, pipe_num
);
802 /* NOTE: Should we also clean up tx buffer in all pipes? */
805 EXPORT_SYMBOL(ath11k_ce_cleanup_pipes
);
807 void ath11k_ce_rx_post_buf(struct ath11k_base
*ab
)
809 struct ath11k_ce_pipe
*pipe
;
813 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
814 pipe
= &ab
->ce
.ce_pipe
[i
];
815 ret
= ath11k_ce_rx_post_pipe(pipe
);
820 ath11k_warn(ab
, "failed to post rx buf to pipe: %d err: %d\n",
822 mod_timer(&ab
->rx_replenish_retry
,
823 jiffies
+ ATH11K_CE_RX_POST_RETRY_JIFFIES
);
829 EXPORT_SYMBOL(ath11k_ce_rx_post_buf
);
831 void ath11k_ce_rx_replenish_retry(struct timer_list
*t
)
833 struct ath11k_base
*ab
= from_timer(ab
, t
, rx_replenish_retry
);
835 ath11k_ce_rx_post_buf(ab
);
838 int ath11k_ce_init_pipes(struct ath11k_base
*ab
)
840 struct ath11k_ce_pipe
*pipe
;
844 ath11k_ce_get_shadow_config(ab
, &ab
->qmi
.ce_cfg
.shadow_reg_v2
,
845 &ab
->qmi
.ce_cfg
.shadow_reg_v2_len
);
847 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
848 pipe
= &ab
->ce
.ce_pipe
[i
];
850 if (pipe
->src_ring
) {
851 ret
= ath11k_ce_init_ring(ab
, pipe
->src_ring
, i
,
854 ath11k_warn(ab
, "failed to init src ring: %d\n",
856 /* Should we clear any partial init */
860 pipe
->src_ring
->write_index
= 0;
861 pipe
->src_ring
->sw_index
= 0;
864 if (pipe
->dest_ring
) {
865 ret
= ath11k_ce_init_ring(ab
, pipe
->dest_ring
, i
,
868 ath11k_warn(ab
, "failed to init dest ring: %d\n",
870 /* Should we clear any partial init */
874 pipe
->rx_buf_needed
= pipe
->dest_ring
->nentries
?
875 pipe
->dest_ring
->nentries
- 2 : 0;
877 pipe
->dest_ring
->write_index
= 0;
878 pipe
->dest_ring
->sw_index
= 0;
881 if (pipe
->status_ring
) {
882 ret
= ath11k_ce_init_ring(ab
, pipe
->status_ring
, i
,
885 ath11k_warn(ab
, "failed to init dest status ing: %d\n",
887 /* Should we clear any partial init */
891 pipe
->status_ring
->write_index
= 0;
892 pipe
->status_ring
->sw_index
= 0;
899 void ath11k_ce_free_pipes(struct ath11k_base
*ab
)
901 struct ath11k_ce_pipe
*pipe
;
905 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
906 pipe
= &ab
->ce
.ce_pipe
[i
];
908 if (ath11k_ce_need_shadow_fix(i
))
909 ath11k_dp_shadow_stop_timer(ab
, &ab
->ce
.hp_timer
[i
]);
911 if (pipe
->src_ring
) {
912 desc_sz
= ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC
);
913 dma_free_coherent(ab
->dev
,
914 pipe
->src_ring
->nentries
* desc_sz
+
916 pipe
->src_ring
->base_addr_owner_space
,
917 pipe
->src_ring
->base_addr_ce_space
);
918 kfree(pipe
->src_ring
);
919 pipe
->src_ring
= NULL
;
922 if (pipe
->dest_ring
) {
923 desc_sz
= ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST
);
924 dma_free_coherent(ab
->dev
,
925 pipe
->dest_ring
->nentries
* desc_sz
+
927 pipe
->dest_ring
->base_addr_owner_space
,
928 pipe
->dest_ring
->base_addr_ce_space
);
929 kfree(pipe
->dest_ring
);
930 pipe
->dest_ring
= NULL
;
933 if (pipe
->status_ring
) {
935 ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS
);
936 dma_free_coherent(ab
->dev
,
937 pipe
->status_ring
->nentries
* desc_sz
+
939 pipe
->status_ring
->base_addr_owner_space
,
940 pipe
->status_ring
->base_addr_ce_space
);
941 kfree(pipe
->status_ring
);
942 pipe
->status_ring
= NULL
;
946 EXPORT_SYMBOL(ath11k_ce_free_pipes
);
948 int ath11k_ce_alloc_pipes(struct ath11k_base
*ab
)
950 struct ath11k_ce_pipe
*pipe
;
953 const struct ce_attr
*attr
;
955 spin_lock_init(&ab
->ce
.ce_lock
);
957 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
958 attr
= &ab
->hw_params
.host_ce_config
[i
];
959 pipe
= &ab
->ce
.ce_pipe
[i
];
962 pipe
->buf_sz
= attr
->src_sz_max
;
964 ret
= ath11k_ce_alloc_pipe(ab
, i
);
966 /* Free any parial successful allocation */
967 ath11k_ce_free_pipes(ab
);
974 EXPORT_SYMBOL(ath11k_ce_alloc_pipes
);
976 /* For Big Endian Host, Copy Engine byte_swap is enabled
977 * When Copy Engine does byte_swap, need to byte swap again for the
978 * Host to get/put buffer content in the correct byte order
980 void ath11k_ce_byte_swap(void *mem
, u32 len
)
984 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
)) {
988 for (i
= 0; i
< (len
/ 4); i
++) {
989 *(u32
*)mem
= swab32(*(u32
*)mem
);
995 int ath11k_ce_get_attr_flags(struct ath11k_base
*ab
, int ce_id
)
997 if (ce_id
>= ab
->hw_params
.ce_count
)
1000 return ab
->hw_params
.host_ce_config
[ce_id
].flags
;
1002 EXPORT_SYMBOL(ath11k_ce_get_attr_flags
);