1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
11 const struct ce_attr ath11k_host_ce_config_ipq8074
[] = {
12 /* CE0: host->target HTC control and raw streams */
14 .flags
= CE_ATTR_FLAGS
,
18 .send_cb
= ath11k_htc_tx_completion_handler
,
21 /* CE1: target->host HTT + HTC control */
23 .flags
= CE_ATTR_FLAGS
,
27 .recv_cb
= ath11k_htc_rx_completion_handler
,
30 /* CE2: target->host WMI */
32 .flags
= CE_ATTR_FLAGS
,
36 .recv_cb
= ath11k_htc_rx_completion_handler
,
39 /* CE3: host->target WMI (mac0) */
41 .flags
= CE_ATTR_FLAGS
,
45 .send_cb
= ath11k_htc_tx_completion_handler
,
48 /* CE4: host->target HTT */
50 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
56 /* CE5: target->host pktlog */
58 .flags
= CE_ATTR_FLAGS
,
62 .recv_cb
= ath11k_dp_htt_htc_t2h_msg_handler
,
65 /* CE6: target autonomous hif_memcpy */
67 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
73 /* CE7: host->target WMI (mac1) */
75 .flags
= CE_ATTR_FLAGS
,
79 .send_cb
= ath11k_htc_tx_completion_handler
,
82 /* CE8: target autonomous hif_memcpy */
84 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
90 /* CE9: host->target WMI (mac2) */
92 .flags
= CE_ATTR_FLAGS
,
96 .send_cb
= ath11k_htc_tx_completion_handler
,
99 /* CE10: target->host HTT */
101 .flags
= CE_ATTR_FLAGS
,
104 .dest_nentries
= 512,
105 .recv_cb
= ath11k_htc_rx_completion_handler
,
110 .flags
= CE_ATTR_FLAGS
,
117 const struct ce_attr ath11k_host_ce_config_qca6390
[] = {
118 /* CE0: host->target HTC control and raw streams */
120 .flags
= CE_ATTR_FLAGS
,
126 /* CE1: target->host HTT + HTC control */
128 .flags
= CE_ATTR_FLAGS
,
131 .dest_nentries
= 512,
132 .recv_cb
= ath11k_htc_rx_completion_handler
,
135 /* CE2: target->host WMI */
137 .flags
= CE_ATTR_FLAGS
,
140 .dest_nentries
= 512,
141 .recv_cb
= ath11k_htc_rx_completion_handler
,
144 /* CE3: host->target WMI (mac0) */
146 .flags
= CE_ATTR_FLAGS
,
150 .send_cb
= ath11k_htc_tx_completion_handler
,
153 /* CE4: host->target HTT */
155 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
156 .src_nentries
= 2048,
161 /* CE5: target->host pktlog */
163 .flags
= CE_ATTR_FLAGS
,
166 .dest_nentries
= 512,
167 .recv_cb
= ath11k_dp_htt_htc_t2h_msg_handler
,
170 /* CE6: target autonomous hif_memcpy */
172 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
178 /* CE7: host->target WMI (mac1) */
180 .flags
= CE_ATTR_FLAGS
,
184 .send_cb
= ath11k_htc_tx_completion_handler
,
187 /* CE8: target autonomous hif_memcpy */
189 .flags
= CE_ATTR_FLAGS
,
197 const struct ce_attr ath11k_host_ce_config_qcn9074
[] = {
198 /* CE0: host->target HTC control and raw streams */
200 .flags
= CE_ATTR_FLAGS
,
206 /* CE1: target->host HTT + HTC control */
208 .flags
= CE_ATTR_FLAGS
,
211 .dest_nentries
= 512,
212 .recv_cb
= ath11k_htc_rx_completion_handler
,
215 /* CE2: target->host WMI */
217 .flags
= CE_ATTR_FLAGS
,
221 .recv_cb
= ath11k_htc_rx_completion_handler
,
224 /* CE3: host->target WMI (mac0) */
226 .flags
= CE_ATTR_FLAGS
,
230 .send_cb
= ath11k_htc_tx_completion_handler
,
233 /* CE4: host->target HTT */
235 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
236 .src_nentries
= 2048,
241 /* CE5: target->host pktlog */
243 .flags
= CE_ATTR_FLAGS
,
246 .dest_nentries
= 512,
247 .recv_cb
= ath11k_dp_htt_htc_t2h_msg_handler
,
251 static bool ath11k_ce_need_shadow_fix(int ce_id
)
253 /* only ce4 needs shadow workaround */
259 void ath11k_ce_stop_shadow_timers(struct ath11k_base
*ab
)
263 if (!ab
->hw_params
.supports_shadow_regs
)
266 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++)
267 if (ath11k_ce_need_shadow_fix(i
))
268 ath11k_dp_shadow_stop_timer(ab
, &ab
->ce
.hp_timer
[i
]);
271 static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe
*pipe
,
272 struct sk_buff
*skb
, dma_addr_t paddr
)
274 struct ath11k_base
*ab
= pipe
->ab
;
275 struct ath11k_ce_ring
*ring
= pipe
->dest_ring
;
276 struct hal_srng
*srng
;
277 unsigned int write_index
;
278 unsigned int nentries_mask
= ring
->nentries_mask
;
282 lockdep_assert_held(&ab
->ce
.ce_lock
);
284 write_index
= ring
->write_index
;
286 srng
= &ab
->hal
.srng_list
[ring
->hal_ring_id
];
288 spin_lock_bh(&srng
->lock
);
290 ath11k_hal_srng_access_begin(ab
, srng
);
292 if (unlikely(ath11k_hal_srng_src_num_free(ab
, srng
, false) < 1)) {
297 desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
);
303 ath11k_hal_ce_dst_set_desc(desc
, paddr
);
305 ring
->skb
[write_index
] = skb
;
306 write_index
= CE_RING_IDX_INCR(nentries_mask
, write_index
);
307 ring
->write_index
= write_index
;
309 pipe
->rx_buf_needed
--;
313 ath11k_hal_srng_access_end(ab
, srng
);
315 spin_unlock_bh(&srng
->lock
);
320 static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe
*pipe
)
322 struct ath11k_base
*ab
= pipe
->ab
;
327 if (!(pipe
->dest_ring
|| pipe
->status_ring
))
330 spin_lock_bh(&ab
->ce
.ce_lock
);
331 while (pipe
->rx_buf_needed
) {
332 skb
= dev_alloc_skb(pipe
->buf_sz
);
338 WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb
->data
, 4));
340 paddr
= dma_map_single(ab
->dev
, skb
->data
,
341 skb
->len
+ skb_tailroom(skb
),
343 if (unlikely(dma_mapping_error(ab
->dev
, paddr
))) {
344 ath11k_warn(ab
, "failed to dma map ce rx buf\n");
345 dev_kfree_skb_any(skb
);
350 ATH11K_SKB_RXCB(skb
)->paddr
= paddr
;
352 ret
= ath11k_ce_rx_buf_enqueue_pipe(pipe
, skb
, paddr
);
355 ath11k_warn(ab
, "failed to enqueue rx buf: %d\n", ret
);
356 dma_unmap_single(ab
->dev
, paddr
,
357 skb
->len
+ skb_tailroom(skb
),
359 dev_kfree_skb_any(skb
);
365 spin_unlock_bh(&ab
->ce
.ce_lock
);
369 static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe
*pipe
,
370 struct sk_buff
**skb
, int *nbytes
)
372 struct ath11k_base
*ab
= pipe
->ab
;
373 struct hal_srng
*srng
;
374 unsigned int sw_index
;
375 unsigned int nentries_mask
;
379 spin_lock_bh(&ab
->ce
.ce_lock
);
381 sw_index
= pipe
->dest_ring
->sw_index
;
382 nentries_mask
= pipe
->dest_ring
->nentries_mask
;
384 srng
= &ab
->hal
.srng_list
[pipe
->status_ring
->hal_ring_id
];
386 spin_lock_bh(&srng
->lock
);
388 ath11k_hal_srng_access_begin(ab
, srng
);
390 desc
= ath11k_hal_srng_dst_get_next_entry(ab
, srng
);
396 *nbytes
= ath11k_hal_ce_dst_status_get_length(desc
);
402 *skb
= pipe
->dest_ring
->skb
[sw_index
];
403 pipe
->dest_ring
->skb
[sw_index
] = NULL
;
405 sw_index
= CE_RING_IDX_INCR(nentries_mask
, sw_index
);
406 pipe
->dest_ring
->sw_index
= sw_index
;
408 pipe
->rx_buf_needed
++;
410 ath11k_hal_srng_access_end(ab
, srng
);
412 spin_unlock_bh(&srng
->lock
);
414 spin_unlock_bh(&ab
->ce
.ce_lock
);
419 static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe
*pipe
)
421 struct ath11k_base
*ab
= pipe
->ab
;
423 struct sk_buff_head list
;
424 unsigned int nbytes
, max_nbytes
;
427 __skb_queue_head_init(&list
);
428 while (ath11k_ce_completed_recv_next(pipe
, &skb
, &nbytes
) == 0) {
429 max_nbytes
= skb
->len
+ skb_tailroom(skb
);
430 dma_unmap_single(ab
->dev
, ATH11K_SKB_RXCB(skb
)->paddr
,
431 max_nbytes
, DMA_FROM_DEVICE
);
433 if (unlikely(max_nbytes
< nbytes
)) {
434 ath11k_warn(ab
, "rxed more than expected (nbytes %d, max %d)",
436 dev_kfree_skb_any(skb
);
440 skb_put(skb
, nbytes
);
441 __skb_queue_tail(&list
, skb
);
444 while ((skb
= __skb_dequeue(&list
))) {
445 ath11k_dbg(ab
, ATH11K_DBG_CE
, "rx ce pipe %d len %d\n",
446 pipe
->pipe_num
, skb
->len
);
447 pipe
->recv_cb(ab
, skb
);
450 ret
= ath11k_ce_rx_post_pipe(pipe
);
451 if (ret
&& ret
!= -ENOSPC
) {
452 ath11k_warn(ab
, "failed to post rx buf to pipe: %d err: %d\n",
453 pipe
->pipe_num
, ret
);
454 mod_timer(&ab
->rx_replenish_retry
,
455 jiffies
+ ATH11K_CE_RX_POST_RETRY_JIFFIES
);
459 static struct sk_buff
*ath11k_ce_completed_send_next(struct ath11k_ce_pipe
*pipe
)
461 struct ath11k_base
*ab
= pipe
->ab
;
462 struct hal_srng
*srng
;
463 unsigned int sw_index
;
464 unsigned int nentries_mask
;
468 spin_lock_bh(&ab
->ce
.ce_lock
);
470 sw_index
= pipe
->src_ring
->sw_index
;
471 nentries_mask
= pipe
->src_ring
->nentries_mask
;
473 srng
= &ab
->hal
.srng_list
[pipe
->src_ring
->hal_ring_id
];
475 spin_lock_bh(&srng
->lock
);
477 ath11k_hal_srng_access_begin(ab
, srng
);
479 desc
= ath11k_hal_srng_src_reap_next(ab
, srng
);
485 skb
= pipe
->src_ring
->skb
[sw_index
];
487 pipe
->src_ring
->skb
[sw_index
] = NULL
;
489 sw_index
= CE_RING_IDX_INCR(nentries_mask
, sw_index
);
490 pipe
->src_ring
->sw_index
= sw_index
;
493 spin_unlock_bh(&srng
->lock
);
495 spin_unlock_bh(&ab
->ce
.ce_lock
);
500 static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe
*pipe
)
502 struct ath11k_base
*ab
= pipe
->ab
;
504 struct sk_buff_head list
;
506 __skb_queue_head_init(&list
);
507 while (!IS_ERR(skb
= ath11k_ce_completed_send_next(pipe
))) {
511 dma_unmap_single(ab
->dev
, ATH11K_SKB_CB(skb
)->paddr
, skb
->len
,
514 if ((!pipe
->send_cb
) || ab
->hw_params
.credit_flow
) {
515 dev_kfree_skb_any(skb
);
519 __skb_queue_tail(&list
, skb
);
522 while ((skb
= __skb_dequeue(&list
))) {
523 ath11k_dbg(ab
, ATH11K_DBG_CE
, "tx ce pipe %d len %d\n",
524 pipe
->pipe_num
, skb
->len
);
525 pipe
->send_cb(ab
, skb
);
529 static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base
*ab
, u32 ce_id
,
530 struct hal_srng_params
*ring_params
)
533 u32 msi_data_count
, msi_data_idx
;
539 ret
= ath11k_get_user_msi_vector(ab
, "CE",
540 &msi_data_count
, &msi_data_start
,
546 ath11k_get_msi_address(ab
, &addr_lo
, &addr_hi
);
547 ath11k_get_ce_msi_idx(ab
, ce_id
, &msi_data_idx
);
549 ring_params
->msi_addr
= addr_lo
;
550 ring_params
->msi_addr
|= (dma_addr_t
)(((uint64_t)addr_hi
) << 32);
551 ring_params
->msi_data
= (msi_data_idx
% msi_data_count
) + msi_data_start
;
552 ring_params
->flags
|= HAL_SRNG_FLAGS_MSI_INTR
;
555 static int ath11k_ce_init_ring(struct ath11k_base
*ab
,
556 struct ath11k_ce_ring
*ce_ring
,
557 int ce_id
, enum hal_ring_type type
)
559 struct hal_srng_params params
= { 0 };
562 params
.ring_base_paddr
= ce_ring
->base_addr_ce_space
;
563 params
.ring_base_vaddr
= ce_ring
->base_addr_owner_space
;
564 params
.num_entries
= ce_ring
->nentries
;
566 if (!(CE_ATTR_DIS_INTR
& ab
->hw_params
.host_ce_config
[ce_id
].flags
))
567 ath11k_ce_srng_msi_ring_params_setup(ab
, ce_id
, ¶ms
);
571 if (!(CE_ATTR_DIS_INTR
& ab
->hw_params
.host_ce_config
[ce_id
].flags
))
572 params
.intr_batch_cntr_thres_entries
= 1;
575 params
.max_buffer_len
= ab
->hw_params
.host_ce_config
[ce_id
].src_sz_max
;
576 if (!(ab
->hw_params
.host_ce_config
[ce_id
].flags
& CE_ATTR_DIS_INTR
)) {
577 params
.intr_timer_thres_us
= 1024;
578 params
.flags
|= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN
;
579 params
.low_threshold
= ce_ring
->nentries
- 3;
582 case HAL_CE_DST_STATUS
:
583 if (!(ab
->hw_params
.host_ce_config
[ce_id
].flags
& CE_ATTR_DIS_INTR
)) {
584 params
.intr_batch_cntr_thres_entries
= 1;
585 params
.intr_timer_thres_us
= 0x1000;
589 ath11k_warn(ab
, "Invalid CE ring type %d\n", type
);
593 /* TODO: Init other params needed by HAL to init the ring */
595 ret
= ath11k_hal_srng_setup(ab
, type
, ce_id
, 0, ¶ms
);
597 ath11k_warn(ab
, "failed to setup srng: %d ring_id %d\n",
602 ce_ring
->hal_ring_id
= ret
;
604 if (ab
->hw_params
.supports_shadow_regs
&&
605 ath11k_ce_need_shadow_fix(ce_id
))
606 ath11k_dp_shadow_init_timer(ab
, &ab
->ce
.hp_timer
[ce_id
],
607 ATH11K_SHADOW_CTRL_TIMER_INTERVAL
,
608 ce_ring
->hal_ring_id
);
613 static struct ath11k_ce_ring
*
614 ath11k_ce_alloc_ring(struct ath11k_base
*ab
, int nentries
, int desc_sz
)
616 struct ath11k_ce_ring
*ce_ring
;
617 dma_addr_t base_addr
;
619 ce_ring
= kzalloc(struct_size(ce_ring
, skb
, nentries
), GFP_KERNEL
);
621 return ERR_PTR(-ENOMEM
);
623 ce_ring
->nentries
= nentries
;
624 ce_ring
->nentries_mask
= nentries
- 1;
626 /* Legacy platforms that do not support cache
627 * coherent DMA are unsupported
629 ce_ring
->base_addr_owner_space_unaligned
=
630 dma_alloc_coherent(ab
->dev
,
631 nentries
* desc_sz
+ CE_DESC_RING_ALIGN
,
632 &base_addr
, GFP_KERNEL
);
633 if (!ce_ring
->base_addr_owner_space_unaligned
) {
635 return ERR_PTR(-ENOMEM
);
638 ce_ring
->base_addr_ce_space_unaligned
= base_addr
;
640 ce_ring
->base_addr_owner_space
= PTR_ALIGN(
641 ce_ring
->base_addr_owner_space_unaligned
,
643 ce_ring
->base_addr_ce_space
= ALIGN(
644 ce_ring
->base_addr_ce_space_unaligned
,
650 static int ath11k_ce_alloc_pipe(struct ath11k_base
*ab
, int ce_id
)
652 struct ath11k_ce_pipe
*pipe
= &ab
->ce
.ce_pipe
[ce_id
];
653 const struct ce_attr
*attr
= &ab
->hw_params
.host_ce_config
[ce_id
];
654 struct ath11k_ce_ring
*ring
;
658 pipe
->attr_flags
= attr
->flags
;
660 if (attr
->src_nentries
) {
661 pipe
->send_cb
= attr
->send_cb
;
662 nentries
= roundup_pow_of_two(attr
->src_nentries
);
663 desc_sz
= ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC
);
664 ring
= ath11k_ce_alloc_ring(ab
, nentries
, desc_sz
);
666 return PTR_ERR(ring
);
667 pipe
->src_ring
= ring
;
670 if (attr
->dest_nentries
) {
671 pipe
->recv_cb
= attr
->recv_cb
;
672 nentries
= roundup_pow_of_two(attr
->dest_nentries
);
673 desc_sz
= ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST
);
674 ring
= ath11k_ce_alloc_ring(ab
, nentries
, desc_sz
);
676 return PTR_ERR(ring
);
677 pipe
->dest_ring
= ring
;
679 desc_sz
= ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS
);
680 ring
= ath11k_ce_alloc_ring(ab
, nentries
, desc_sz
);
682 return PTR_ERR(ring
);
683 pipe
->status_ring
= ring
;
689 void ath11k_ce_per_engine_service(struct ath11k_base
*ab
, u16 ce_id
)
691 struct ath11k_ce_pipe
*pipe
= &ab
->ce
.ce_pipe
[ce_id
];
692 const struct ce_attr
*attr
= &ab
->hw_params
.host_ce_config
[ce_id
];
694 if (attr
->src_nentries
)
695 ath11k_ce_tx_process_cb(pipe
);
698 ath11k_ce_recv_process_cb(pipe
);
701 void ath11k_ce_poll_send_completed(struct ath11k_base
*ab
, u8 pipe_id
)
703 struct ath11k_ce_pipe
*pipe
= &ab
->ce
.ce_pipe
[pipe_id
];
704 const struct ce_attr
*attr
= &ab
->hw_params
.host_ce_config
[pipe_id
];
706 if ((pipe
->attr_flags
& CE_ATTR_DIS_INTR
) && attr
->src_nentries
)
707 ath11k_ce_tx_process_cb(pipe
);
709 EXPORT_SYMBOL(ath11k_ce_per_engine_service
);
711 int ath11k_ce_send(struct ath11k_base
*ab
, struct sk_buff
*skb
, u8 pipe_id
,
714 struct ath11k_ce_pipe
*pipe
= &ab
->ce
.ce_pipe
[pipe_id
];
715 struct hal_srng
*srng
;
717 unsigned int write_index
, sw_index
;
718 unsigned int nentries_mask
;
720 u8 byte_swap_data
= 0;
723 /* Check if some entries could be regained by handling tx completion if
724 * the CE has interrupts disabled and the used entries is more than the
725 * defined usage threshold.
727 if (pipe
->attr_flags
& CE_ATTR_DIS_INTR
) {
728 spin_lock_bh(&ab
->ce
.ce_lock
);
729 write_index
= pipe
->src_ring
->write_index
;
731 sw_index
= pipe
->src_ring
->sw_index
;
733 if (write_index
>= sw_index
)
734 num_used
= write_index
- sw_index
;
736 num_used
= pipe
->src_ring
->nentries
- sw_index
+
739 spin_unlock_bh(&ab
->ce
.ce_lock
);
741 if (num_used
> ATH11K_CE_USAGE_THRESHOLD
)
742 ath11k_ce_poll_send_completed(ab
, pipe
->pipe_num
);
745 if (test_bit(ATH11K_FLAG_CRASH_FLUSH
, &ab
->dev_flags
))
748 spin_lock_bh(&ab
->ce
.ce_lock
);
750 write_index
= pipe
->src_ring
->write_index
;
751 nentries_mask
= pipe
->src_ring
->nentries_mask
;
753 srng
= &ab
->hal
.srng_list
[pipe
->src_ring
->hal_ring_id
];
755 spin_lock_bh(&srng
->lock
);
757 ath11k_hal_srng_access_begin(ab
, srng
);
759 if (unlikely(ath11k_hal_srng_src_num_free(ab
, srng
, false) < 1)) {
760 ath11k_hal_srng_access_end(ab
, srng
);
765 desc
= ath11k_hal_srng_src_get_next_reaped(ab
, srng
);
767 ath11k_hal_srng_access_end(ab
, srng
);
772 if (pipe
->attr_flags
& CE_ATTR_BYTE_SWAP_DATA
)
775 ath11k_hal_ce_src_set_desc(desc
, ATH11K_SKB_CB(skb
)->paddr
,
776 skb
->len
, transfer_id
, byte_swap_data
);
778 pipe
->src_ring
->skb
[write_index
] = skb
;
779 pipe
->src_ring
->write_index
= CE_RING_IDX_INCR(nentries_mask
,
782 ath11k_hal_srng_access_end(ab
, srng
);
784 if (ath11k_ce_need_shadow_fix(pipe_id
))
785 ath11k_dp_shadow_start_timer(ab
, srng
, &ab
->ce
.hp_timer
[pipe_id
]);
787 spin_unlock_bh(&srng
->lock
);
789 spin_unlock_bh(&ab
->ce
.ce_lock
);
794 spin_unlock_bh(&srng
->lock
);
796 spin_unlock_bh(&ab
->ce
.ce_lock
);
801 static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe
*pipe
)
803 struct ath11k_base
*ab
= pipe
->ab
;
804 struct ath11k_ce_ring
*ring
= pipe
->dest_ring
;
808 if (!(ring
&& pipe
->buf_sz
))
811 for (i
= 0; i
< ring
->nentries
; i
++) {
817 dma_unmap_single(ab
->dev
, ATH11K_SKB_RXCB(skb
)->paddr
,
818 skb
->len
+ skb_tailroom(skb
), DMA_FROM_DEVICE
);
819 dev_kfree_skb_any(skb
);
823 static void ath11k_ce_shadow_config(struct ath11k_base
*ab
)
827 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
828 if (ab
->hw_params
.host_ce_config
[i
].src_nentries
)
829 ath11k_hal_srng_update_shadow_config(ab
,
832 if (ab
->hw_params
.host_ce_config
[i
].dest_nentries
) {
833 ath11k_hal_srng_update_shadow_config(ab
,
836 ath11k_hal_srng_update_shadow_config(ab
,
837 HAL_CE_DST_STATUS
, i
);
842 void ath11k_ce_get_shadow_config(struct ath11k_base
*ab
,
843 u32
**shadow_cfg
, u32
*shadow_cfg_len
)
845 if (!ab
->hw_params
.supports_shadow_regs
)
848 ath11k_hal_srng_get_shadow_config(ab
, shadow_cfg
, shadow_cfg_len
);
850 /* shadow is already configured */
854 /* shadow isn't configured yet, configure now.
855 * non-CE srngs are configured firstly, then
858 ath11k_hal_srng_shadow_config(ab
);
859 ath11k_ce_shadow_config(ab
);
861 /* get the shadow configuration */
862 ath11k_hal_srng_get_shadow_config(ab
, shadow_cfg
, shadow_cfg_len
);
864 EXPORT_SYMBOL(ath11k_ce_get_shadow_config
);
866 void ath11k_ce_cleanup_pipes(struct ath11k_base
*ab
)
868 struct ath11k_ce_pipe
*pipe
;
871 ath11k_ce_stop_shadow_timers(ab
);
873 for (pipe_num
= 0; pipe_num
< ab
->hw_params
.ce_count
; pipe_num
++) {
874 pipe
= &ab
->ce
.ce_pipe
[pipe_num
];
875 ath11k_ce_rx_pipe_cleanup(pipe
);
877 /* Cleanup any src CE's which have interrupts disabled */
878 ath11k_ce_poll_send_completed(ab
, pipe_num
);
880 /* NOTE: Should we also clean up tx buffer in all pipes? */
883 EXPORT_SYMBOL(ath11k_ce_cleanup_pipes
);
885 void ath11k_ce_rx_post_buf(struct ath11k_base
*ab
)
887 struct ath11k_ce_pipe
*pipe
;
891 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
892 pipe
= &ab
->ce
.ce_pipe
[i
];
893 ret
= ath11k_ce_rx_post_pipe(pipe
);
898 ath11k_warn(ab
, "failed to post rx buf to pipe: %d err: %d\n",
900 mod_timer(&ab
->rx_replenish_retry
,
901 jiffies
+ ATH11K_CE_RX_POST_RETRY_JIFFIES
);
907 EXPORT_SYMBOL(ath11k_ce_rx_post_buf
);
909 void ath11k_ce_rx_replenish_retry(struct timer_list
*t
)
911 struct ath11k_base
*ab
= from_timer(ab
, t
, rx_replenish_retry
);
913 ath11k_ce_rx_post_buf(ab
);
916 int ath11k_ce_init_pipes(struct ath11k_base
*ab
)
918 struct ath11k_ce_pipe
*pipe
;
922 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
923 pipe
= &ab
->ce
.ce_pipe
[i
];
925 if (pipe
->src_ring
) {
926 ret
= ath11k_ce_init_ring(ab
, pipe
->src_ring
, i
,
929 ath11k_warn(ab
, "failed to init src ring: %d\n",
931 /* Should we clear any partial init */
935 pipe
->src_ring
->write_index
= 0;
936 pipe
->src_ring
->sw_index
= 0;
939 if (pipe
->dest_ring
) {
940 ret
= ath11k_ce_init_ring(ab
, pipe
->dest_ring
, i
,
943 ath11k_warn(ab
, "failed to init dest ring: %d\n",
945 /* Should we clear any partial init */
949 pipe
->rx_buf_needed
= pipe
->dest_ring
->nentries
?
950 pipe
->dest_ring
->nentries
- 2 : 0;
952 pipe
->dest_ring
->write_index
= 0;
953 pipe
->dest_ring
->sw_index
= 0;
956 if (pipe
->status_ring
) {
957 ret
= ath11k_ce_init_ring(ab
, pipe
->status_ring
, i
,
960 ath11k_warn(ab
, "failed to init dest status ing: %d\n",
962 /* Should we clear any partial init */
966 pipe
->status_ring
->write_index
= 0;
967 pipe
->status_ring
->sw_index
= 0;
974 void ath11k_ce_free_pipes(struct ath11k_base
*ab
)
976 struct ath11k_ce_pipe
*pipe
;
977 struct ath11k_ce_ring
*ce_ring
;
981 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
982 pipe
= &ab
->ce
.ce_pipe
[i
];
984 if (ath11k_ce_need_shadow_fix(i
))
985 ath11k_dp_shadow_stop_timer(ab
, &ab
->ce
.hp_timer
[i
]);
987 if (pipe
->src_ring
) {
988 desc_sz
= ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC
);
989 ce_ring
= pipe
->src_ring
;
990 dma_free_coherent(ab
->dev
,
991 pipe
->src_ring
->nentries
* desc_sz
+
993 ce_ring
->base_addr_owner_space_unaligned
,
994 ce_ring
->base_addr_ce_space_unaligned
);
995 kfree(pipe
->src_ring
);
996 pipe
->src_ring
= NULL
;
999 if (pipe
->dest_ring
) {
1000 desc_sz
= ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST
);
1001 ce_ring
= pipe
->dest_ring
;
1002 dma_free_coherent(ab
->dev
,
1003 pipe
->dest_ring
->nentries
* desc_sz
+
1005 ce_ring
->base_addr_owner_space_unaligned
,
1006 ce_ring
->base_addr_ce_space_unaligned
);
1007 kfree(pipe
->dest_ring
);
1008 pipe
->dest_ring
= NULL
;
1011 if (pipe
->status_ring
) {
1013 ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS
);
1014 ce_ring
= pipe
->status_ring
;
1015 dma_free_coherent(ab
->dev
,
1016 pipe
->status_ring
->nentries
* desc_sz
+
1018 ce_ring
->base_addr_owner_space_unaligned
,
1019 ce_ring
->base_addr_ce_space_unaligned
);
1020 kfree(pipe
->status_ring
);
1021 pipe
->status_ring
= NULL
;
1025 EXPORT_SYMBOL(ath11k_ce_free_pipes
);
1027 int ath11k_ce_alloc_pipes(struct ath11k_base
*ab
)
1029 struct ath11k_ce_pipe
*pipe
;
1032 const struct ce_attr
*attr
;
1034 spin_lock_init(&ab
->ce
.ce_lock
);
1036 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
1037 attr
= &ab
->hw_params
.host_ce_config
[i
];
1038 pipe
= &ab
->ce
.ce_pipe
[i
];
1041 pipe
->buf_sz
= attr
->src_sz_max
;
1043 ret
= ath11k_ce_alloc_pipe(ab
, i
);
1045 /* Free any partial successful allocation */
1046 ath11k_ce_free_pipes(ab
);
1053 EXPORT_SYMBOL(ath11k_ce_alloc_pipes
);
1055 /* For Big Endian Host, Copy Engine byte_swap is enabled
1056 * When Copy Engine does byte_swap, need to byte swap again for the
1057 * Host to get/put buffer content in the correct byte order
1059 void ath11k_ce_byte_swap(void *mem
, u32 len
)
1063 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
)) {
1067 for (i
= 0; i
< (len
/ 4); i
++) {
1068 *(u32
*)mem
= swab32(*(u32
*)mem
);
1074 int ath11k_ce_get_attr_flags(struct ath11k_base
*ab
, int ce_id
)
1076 if (ce_id
>= ab
->hw_params
.ce_count
)
1079 return ab
->hw_params
.host_ce_config
[ce_id
].flags
;
1081 EXPORT_SYMBOL(ath11k_ce_get_attr_flags
);