1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
10 #define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf
12 int ath11k_dbring_validate_buffer(struct ath11k
*ar
, void *buffer
, u32 size
)
19 for (idx
= 0, temp
= buffer
; idx
< size
; idx
++, temp
++) {
20 if (*temp
== ATH11K_DB_MAGIC_VALUE
)
27 static void ath11k_dbring_fill_magic_value(struct ath11k
*ar
,
28 void *buffer
, u32 size
)
30 /* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE
31 * and the variable size is expected to be the number of u32 values
32 * to be stored, not the number of bytes.
34 size
= size
/ sizeof(u32
);
36 memset32(buffer
, ATH11K_DB_MAGIC_VALUE
, size
);
39 static int ath11k_dbring_bufs_replenish(struct ath11k
*ar
,
40 struct ath11k_dbring
*ring
,
41 struct ath11k_dbring_element
*buff
,
42 enum wmi_direct_buffer_module id
)
44 struct ath11k_base
*ab
= ar
->ab
;
45 struct hal_srng
*srng
;
47 void *ptr_aligned
, *ptr_unaligned
, *desc
;
52 srng
= &ab
->hal
.srng_list
[ring
->refill_srng
.ring_id
];
54 lockdep_assert_held(&srng
->lock
);
56 ath11k_hal_srng_access_begin(ab
, srng
);
58 ptr_unaligned
= buff
->payload
;
59 ptr_aligned
= PTR_ALIGN(ptr_unaligned
, ring
->buf_align
);
60 ath11k_dbring_fill_magic_value(ar
, ptr_aligned
, ring
->buf_sz
);
61 paddr
= dma_map_single(ab
->dev
, ptr_aligned
, ring
->buf_sz
,
64 ret
= dma_mapping_error(ab
->dev
, paddr
);
68 spin_lock_bh(&ring
->idr_lock
);
69 buf_id
= idr_alloc(&ring
->bufs_idr
, buff
, 0, ring
->bufs_max
, GFP_ATOMIC
);
70 spin_unlock_bh(&ring
->idr_lock
);
76 desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
);
84 cookie
= FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID
, ar
->pdev_idx
) |
85 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID
, buf_id
);
87 ath11k_hal_rx_buf_addr_info_set(desc
, paddr
, cookie
, 0);
89 ath11k_debugfs_add_dbring_entry(ar
, id
, ATH11K_DBG_DBR_EVENT_REPLENISH
, srng
);
90 ath11k_hal_srng_access_end(ab
, srng
);
95 spin_lock_bh(&ring
->idr_lock
);
96 idr_remove(&ring
->bufs_idr
, buf_id
);
97 spin_unlock_bh(&ring
->idr_lock
);
99 dma_unmap_single(ab
->dev
, paddr
, ring
->buf_sz
,
102 ath11k_hal_srng_access_end(ab
, srng
);
106 static int ath11k_dbring_fill_bufs(struct ath11k
*ar
,
107 struct ath11k_dbring
*ring
,
108 enum wmi_direct_buffer_module id
)
110 struct ath11k_dbring_element
*buff
;
111 struct hal_srng
*srng
;
112 int num_remain
, req_entries
, num_free
;
116 srng
= &ar
->ab
->hal
.srng_list
[ring
->refill_srng
.ring_id
];
118 spin_lock_bh(&srng
->lock
);
120 num_free
= ath11k_hal_srng_src_num_free(ar
->ab
, srng
, true);
121 req_entries
= min(num_free
, ring
->bufs_max
);
122 num_remain
= req_entries
;
123 align
= ring
->buf_align
;
124 size
= ring
->buf_sz
+ align
- 1;
126 while (num_remain
> 0) {
127 buff
= kzalloc(sizeof(*buff
), GFP_ATOMIC
);
131 buff
->payload
= kzalloc(size
, GFP_ATOMIC
);
132 if (!buff
->payload
) {
136 ret
= ath11k_dbring_bufs_replenish(ar
, ring
, buff
, id
);
138 ath11k_warn(ar
->ab
, "failed to replenish db ring num_remain %d req_ent %d\n",
139 num_remain
, req_entries
);
140 kfree(buff
->payload
);
147 spin_unlock_bh(&srng
->lock
);
152 int ath11k_dbring_wmi_cfg_setup(struct ath11k
*ar
,
153 struct ath11k_dbring
*ring
,
154 enum wmi_direct_buffer_module id
)
156 struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param
= {0};
159 if (id
>= WMI_DIRECT_BUF_MAX
)
162 param
.pdev_id
= DP_SW2HW_MACID(ring
->pdev_id
);
163 param
.module_id
= id
;
164 param
.base_paddr_lo
= lower_32_bits(ring
->refill_srng
.paddr
);
165 param
.base_paddr_hi
= upper_32_bits(ring
->refill_srng
.paddr
);
166 param
.head_idx_paddr_lo
= lower_32_bits(ring
->hp_addr
);
167 param
.head_idx_paddr_hi
= upper_32_bits(ring
->hp_addr
);
168 param
.tail_idx_paddr_lo
= lower_32_bits(ring
->tp_addr
);
169 param
.tail_idx_paddr_hi
= upper_32_bits(ring
->tp_addr
);
170 param
.num_elems
= ring
->bufs_max
;
171 param
.buf_size
= ring
->buf_sz
;
172 param
.num_resp_per_event
= ring
->num_resp_per_event
;
173 param
.event_timeout_ms
= ring
->event_timeout_ms
;
175 ret
= ath11k_wmi_pdev_dma_ring_cfg(ar
, ¶m
);
177 ath11k_warn(ar
->ab
, "failed to setup db ring cfg\n");
184 int ath11k_dbring_set_cfg(struct ath11k
*ar
, struct ath11k_dbring
*ring
,
185 u32 num_resp_per_event
, u32 event_timeout_ms
,
186 int (*handler
)(struct ath11k
*,
187 struct ath11k_dbring_data
*))
192 ring
->num_resp_per_event
= num_resp_per_event
;
193 ring
->event_timeout_ms
= event_timeout_ms
;
194 ring
->handler
= handler
;
199 int ath11k_dbring_buf_setup(struct ath11k
*ar
,
200 struct ath11k_dbring
*ring
,
201 struct ath11k_dbring_cap
*db_cap
)
203 struct ath11k_base
*ab
= ar
->ab
;
204 struct hal_srng
*srng
;
207 srng
= &ab
->hal
.srng_list
[ring
->refill_srng
.ring_id
];
208 ring
->bufs_max
= ring
->refill_srng
.size
/
209 ath11k_hal_srng_get_entrysize(ab
, HAL_RXDMA_DIR_BUF
);
211 ring
->buf_sz
= db_cap
->min_buf_sz
;
212 ring
->buf_align
= db_cap
->min_buf_align
;
213 ring
->pdev_id
= db_cap
->pdev_id
;
214 ring
->hp_addr
= ath11k_hal_srng_get_hp_addr(ar
->ab
, srng
);
215 ring
->tp_addr
= ath11k_hal_srng_get_tp_addr(ar
->ab
, srng
);
217 ret
= ath11k_dbring_fill_bufs(ar
, ring
, db_cap
->id
);
222 int ath11k_dbring_srng_setup(struct ath11k
*ar
, struct ath11k_dbring
*ring
,
223 int ring_num
, int num_entries
)
227 ret
= ath11k_dp_srng_setup(ar
->ab
, &ring
->refill_srng
, HAL_RXDMA_DIR_BUF
,
228 ring_num
, ar
->pdev_idx
, num_entries
);
230 ath11k_warn(ar
->ab
, "failed to setup srng: %d ring_id %d\n",
237 ath11k_dp_srng_cleanup(ar
->ab
, &ring
->refill_srng
);
241 int ath11k_dbring_get_cap(struct ath11k_base
*ab
,
243 enum wmi_direct_buffer_module id
,
244 struct ath11k_dbring_cap
*db_cap
)
248 if (!ab
->num_db_cap
|| !ab
->db_caps
)
251 if (id
>= WMI_DIRECT_BUF_MAX
)
254 for (i
= 0; i
< ab
->num_db_cap
; i
++) {
255 if (pdev_idx
== ab
->db_caps
[i
].pdev_id
&&
256 id
== ab
->db_caps
[i
].id
) {
257 *db_cap
= ab
->db_caps
[i
];
266 int ath11k_dbring_buffer_release_event(struct ath11k_base
*ab
,
267 struct ath11k_dbring_buf_release_event
*ev
)
269 struct ath11k_dbring
*ring
;
270 struct hal_srng
*srng
;
272 struct ath11k_dbring_element
*buff
;
273 struct ath11k_dbring_data handler_data
;
274 struct ath11k_buffer_addr desc
;
276 u32 num_entry
, num_buff_reaped
;
277 u8 pdev_idx
, rbm
, module_id
;
284 pdev_idx
= ev
->fixed
.pdev_id
;
285 module_id
= ev
->fixed
.module_id
;
287 if (pdev_idx
>= ab
->num_radios
) {
288 ath11k_warn(ab
, "Invalid pdev id %d\n", pdev_idx
);
292 if (ev
->fixed
.num_buf_release_entry
!=
293 ev
->fixed
.num_meta_data_entry
) {
294 ath11k_warn(ab
, "Buffer entry %d mismatch meta entry %d\n",
295 ev
->fixed
.num_buf_release_entry
,
296 ev
->fixed
.num_meta_data_entry
);
300 ar
= ab
->pdevs
[pdev_idx
].ar
;
303 if (!rcu_dereference(ab
->pdevs_active
[pdev_idx
])) {
308 switch (ev
->fixed
.module_id
) {
309 case WMI_DIRECT_BUF_SPECTRAL
:
310 ring
= ath11k_spectral_get_dbring(ar
);
314 ath11k_warn(ab
, "Recv dma buffer release ev on unsupp module %d\n",
315 ev
->fixed
.module_id
);
324 srng
= &ab
->hal
.srng_list
[ring
->refill_srng
.ring_id
];
325 num_entry
= ev
->fixed
.num_buf_release_entry
;
326 size
= ring
->buf_sz
+ ring
->buf_align
- 1;
329 spin_lock_bh(&srng
->lock
);
331 while (num_buff_reaped
< num_entry
) {
332 desc
.info0
= ev
->buf_entry
[num_buff_reaped
].paddr_lo
;
333 desc
.info1
= ev
->buf_entry
[num_buff_reaped
].paddr_hi
;
334 handler_data
.meta
= ev
->meta_data
[num_buff_reaped
];
338 ath11k_hal_rx_buf_addr_info_get(&desc
, &paddr
, &cookie
, &rbm
);
340 buf_id
= FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID
, cookie
);
342 spin_lock_bh(&ring
->idr_lock
);
343 buff
= idr_find(&ring
->bufs_idr
, buf_id
);
345 spin_unlock_bh(&ring
->idr_lock
);
348 idr_remove(&ring
->bufs_idr
, buf_id
);
349 spin_unlock_bh(&ring
->idr_lock
);
351 dma_unmap_single(ab
->dev
, buff
->paddr
, ring
->buf_sz
,
354 ath11k_debugfs_add_dbring_entry(ar
, module_id
,
355 ATH11K_DBG_DBR_EVENT_RX
, srng
);
358 vaddr_unalign
= buff
->payload
;
359 handler_data
.data
= PTR_ALIGN(vaddr_unalign
,
361 handler_data
.data_sz
= ring
->buf_sz
;
363 ring
->handler(ar
, &handler_data
);
367 memset(buff
->payload
, 0, size
);
368 ath11k_dbring_bufs_replenish(ar
, ring
, buff
, module_id
);
371 spin_unlock_bh(&srng
->lock
);
379 void ath11k_dbring_srng_cleanup(struct ath11k
*ar
, struct ath11k_dbring
*ring
)
381 ath11k_dp_srng_cleanup(ar
->ab
, &ring
->refill_srng
);
384 void ath11k_dbring_buf_cleanup(struct ath11k
*ar
, struct ath11k_dbring
*ring
)
386 struct ath11k_dbring_element
*buff
;
389 spin_lock_bh(&ring
->idr_lock
);
390 idr_for_each_entry(&ring
->bufs_idr
, buff
, buf_id
) {
391 idr_remove(&ring
->bufs_idr
, buf_id
);
392 dma_unmap_single(ar
->ab
->dev
, buff
->paddr
,
393 ring
->buf_sz
, DMA_FROM_DEVICE
);
394 kfree(buff
->payload
);
398 idr_destroy(&ring
->bufs_idr
);
399 spin_unlock_bh(&ring
->idr_lock
);