1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
7 #include <linux/types.h>
8 #include <linux/errno.h>
10 #include <linux/seq_file.h>
16 struct desc_alloc_info
{
21 static int wil_is_pmc_allocated(struct pmc_ctx
*pmc
)
23 return !!pmc
->pring_va
;
26 void wil_pmc_init(struct wil6210_priv
*wil
)
28 memset(&wil
->pmc
, 0, sizeof(struct pmc_ctx
));
29 mutex_init(&wil
->pmc
.lock
);
33 * Allocate the physical ring (p-ring) and the required
34 * number of descriptors of required size.
35 * Initialize the descriptors as required by pmc dma.
36 * The descriptors' buffers dwords are initialized to hold
37 * dword's serial number in the lsw and reserved value
38 * PCM_DATA_INVALID_DW_VAL in the msw.
40 void wil_pmc_alloc(struct wil6210_priv
*wil
,
45 struct pmc_ctx
*pmc
= &wil
->pmc
;
46 struct device
*dev
= wil_to_dev(wil
);
47 struct wil6210_vif
*vif
= ndev_to_vif(wil
->main_ndev
);
48 struct wmi_pmc_cmd pmc_cmd
= {0};
49 int last_cmd_err
= -ENOMEM
;
51 mutex_lock(&pmc
->lock
);
53 if (wil_is_pmc_allocated(pmc
)) {
55 wil_err(wil
, "ERROR pmc is already allocated\n");
58 if ((num_descriptors
<= 0) || (descriptor_size
<= 0)) {
60 "Invalid params num_descriptors(%d), descriptor_size(%d)\n",
61 num_descriptors
, descriptor_size
);
62 last_cmd_err
= -EINVAL
;
66 if (num_descriptors
> (1 << WIL_RING_SIZE_ORDER_MAX
)) {
68 "num_descriptors(%d) exceeds max ring size %d\n",
69 num_descriptors
, 1 << WIL_RING_SIZE_ORDER_MAX
);
70 last_cmd_err
= -EINVAL
;
74 if (num_descriptors
> INT_MAX
/ descriptor_size
) {
76 "Overflow in num_descriptors(%d)*descriptor_size(%d)\n",
77 num_descriptors
, descriptor_size
);
78 last_cmd_err
= -EINVAL
;
82 pmc
->num_descriptors
= num_descriptors
;
83 pmc
->descriptor_size
= descriptor_size
;
85 wil_dbg_misc(wil
, "pmc_alloc: %d descriptors x %d bytes each\n",
86 num_descriptors
, descriptor_size
);
88 /* allocate descriptors info list in pmc context*/
89 pmc
->descriptors
= kcalloc(num_descriptors
,
90 sizeof(struct desc_alloc_info
),
92 if (!pmc
->descriptors
) {
93 wil_err(wil
, "ERROR allocating pmc skb list\n");
97 wil_dbg_misc(wil
, "pmc_alloc: allocated descriptors info list %p\n",
100 /* Allocate pring buffer and descriptors.
101 * vring->va should be aligned on its size rounded up to power of 2
102 * This is granted by the dma_alloc_coherent.
104 * HW has limitation that all vrings addresses must share the same
105 * upper 16 msb bits part of 48 bits address. To workaround that,
106 * if we are using more than 32 bit addresses switch to 32 bit
107 * allocation before allocating vring memory.
109 * There's no check for the return value of dma_set_mask_and_coherent,
110 * since we assume if we were able to set the mask during
111 * initialization in this system it will not fail if we set it again
113 if (wil
->dma_addr_size
> 32)
114 dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
116 pmc
->pring_va
= dma_alloc_coherent(dev
,
117 sizeof(struct vring_tx_desc
) * num_descriptors
,
121 if (wil
->dma_addr_size
> 32)
122 dma_set_mask_and_coherent(dev
,
123 DMA_BIT_MASK(wil
->dma_addr_size
));
126 "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
127 pmc
->pring_va
, &pmc
->pring_pa
,
128 sizeof(struct vring_tx_desc
),
130 sizeof(struct vring_tx_desc
) * num_descriptors
);
132 if (!pmc
->pring_va
) {
133 wil_err(wil
, "ERROR allocating pmc pring\n");
134 goto release_pmc_skb_list
;
137 /* initially, all descriptors are SW owned
138 * For Tx, Rx, and PMC, ownership bit is at the same location, thus
141 for (i
= 0; i
< num_descriptors
; i
++) {
142 struct vring_tx_desc
*_d
= &pmc
->pring_va
[i
];
143 struct vring_tx_desc dd
= {}, *d
= &dd
;
146 pmc
->descriptors
[i
].va
= dma_alloc_coherent(dev
,
148 &pmc
->descriptors
[i
].pa
,
151 if (unlikely(!pmc
->descriptors
[i
].va
)) {
152 wil_err(wil
, "ERROR allocating pmc descriptor %d", i
);
153 goto release_pmc_skbs
;
156 for (j
= 0; j
< descriptor_size
/ sizeof(u32
); j
++) {
157 u32
*p
= (u32
*)pmc
->descriptors
[i
].va
+ j
;
158 *p
= PCM_DATA_INVALID_DW_VAL
| j
;
161 /* configure dma descriptor */
162 d
->dma
.addr
.addr_low
=
163 cpu_to_le32(lower_32_bits(pmc
->descriptors
[i
].pa
));
164 d
->dma
.addr
.addr_high
=
165 cpu_to_le16((u16
)upper_32_bits(pmc
->descriptors
[i
].pa
));
166 d
->dma
.status
= 0; /* 0 = HW_OWNED */
167 d
->dma
.length
= cpu_to_le16(descriptor_size
);
168 d
->dma
.d0
= BIT(9) | RX_DMA_D0_CMD_DMA_IT
;
172 wil_dbg_misc(wil
, "pmc_alloc: allocated successfully\n");
174 pmc_cmd
.op
= WMI_PMC_ALLOCATE
;
175 pmc_cmd
.ring_size
= cpu_to_le16(pmc
->num_descriptors
);
176 pmc_cmd
.mem_base
= cpu_to_le64(pmc
->pring_pa
);
178 wil_dbg_misc(wil
, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
179 pmc
->last_cmd_status
= wmi_send(wil
,
184 if (pmc
->last_cmd_status
) {
186 "WMI_PMC_CMD with ALLOCATE op failed with status %d",
187 pmc
->last_cmd_status
);
188 goto release_pmc_skbs
;
191 mutex_unlock(&pmc
->lock
);
196 wil_err(wil
, "exit on error: Releasing skbs...\n");
197 for (i
= 0; i
< num_descriptors
&& pmc
->descriptors
[i
].va
; i
++) {
198 dma_free_coherent(dev
,
200 pmc
->descriptors
[i
].va
,
201 pmc
->descriptors
[i
].pa
);
203 pmc
->descriptors
[i
].va
= NULL
;
205 wil_err(wil
, "exit on error: Releasing pring...\n");
207 dma_free_coherent(dev
,
208 sizeof(struct vring_tx_desc
) * num_descriptors
,
212 pmc
->pring_va
= NULL
;
214 release_pmc_skb_list
:
215 wil_err(wil
, "exit on error: Releasing descriptors info list...\n");
216 kfree(pmc
->descriptors
);
217 pmc
->descriptors
= NULL
;
220 pmc
->last_cmd_status
= last_cmd_err
;
221 mutex_unlock(&pmc
->lock
);
225 * Traverse the p-ring and release all buffers.
226 * At the end release the p-ring memory
228 void wil_pmc_free(struct wil6210_priv
*wil
, int send_pmc_cmd
)
230 struct pmc_ctx
*pmc
= &wil
->pmc
;
231 struct device
*dev
= wil_to_dev(wil
);
232 struct wil6210_vif
*vif
= ndev_to_vif(wil
->main_ndev
);
233 struct wmi_pmc_cmd pmc_cmd
= {0};
235 mutex_lock(&pmc
->lock
);
237 pmc
->last_cmd_status
= 0;
239 if (!wil_is_pmc_allocated(pmc
)) {
241 "pmc_free: Error, can't free - not allocated\n");
242 pmc
->last_cmd_status
= -EPERM
;
243 mutex_unlock(&pmc
->lock
);
248 wil_dbg_misc(wil
, "send WMI_PMC_CMD with RELEASE op\n");
249 pmc_cmd
.op
= WMI_PMC_RELEASE
;
250 pmc
->last_cmd_status
=
251 wmi_send(wil
, WMI_PMC_CMDID
, vif
->mid
,
252 &pmc_cmd
, sizeof(pmc_cmd
));
253 if (pmc
->last_cmd_status
) {
255 "WMI_PMC_CMD with RELEASE op failed, status %d",
256 pmc
->last_cmd_status
);
257 /* There's nothing we can do with this error.
258 * Normally, it should never occur.
259 * Continue to freeing all memory allocated for pmc.
265 size_t buf_size
= sizeof(struct vring_tx_desc
) *
266 pmc
->num_descriptors
;
268 wil_dbg_misc(wil
, "pmc_free: free pring va %p\n",
270 dma_free_coherent(dev
, buf_size
, pmc
->pring_va
, pmc
->pring_pa
);
272 pmc
->pring_va
= NULL
;
274 pmc
->last_cmd_status
= -ENOENT
;
277 if (pmc
->descriptors
) {
281 i
< pmc
->num_descriptors
&& pmc
->descriptors
[i
].va
; i
++) {
282 dma_free_coherent(dev
,
283 pmc
->descriptor_size
,
284 pmc
->descriptors
[i
].va
,
285 pmc
->descriptors
[i
].pa
);
286 pmc
->descriptors
[i
].va
= NULL
;
288 wil_dbg_misc(wil
, "pmc_free: free descriptor info %d/%d\n", i
,
289 pmc
->num_descriptors
);
291 "pmc_free: free pmc descriptors info list %p\n",
293 kfree(pmc
->descriptors
);
294 pmc
->descriptors
= NULL
;
296 pmc
->last_cmd_status
= -ENOENT
;
299 mutex_unlock(&pmc
->lock
);
303 * Status of the last operation requested via debugfs: alloc/free/read.
304 * 0 - success or negative errno
306 int wil_pmc_last_cmd_status(struct wil6210_priv
*wil
)
308 wil_dbg_misc(wil
, "pmc_last_cmd_status: status %d\n",
309 wil
->pmc
.last_cmd_status
);
311 return wil
->pmc
.last_cmd_status
;
315 * Read from required position up to the end of current descriptor,
316 * depends on descriptor size configured during alloc request.
318 ssize_t
wil_pmc_read(struct file
*filp
, char __user
*buf
, size_t count
,
321 struct wil6210_priv
*wil
= filp
->private_data
;
322 struct pmc_ctx
*pmc
= &wil
->pmc
;
324 unsigned long long idx
;
328 mutex_lock(&pmc
->lock
);
330 if (!wil_is_pmc_allocated(pmc
)) {
331 wil_err(wil
, "error, pmc is not allocated!\n");
332 pmc
->last_cmd_status
= -EPERM
;
333 mutex_unlock(&pmc
->lock
);
337 pmc_size
= pmc
->descriptor_size
* pmc
->num_descriptors
;
340 "pmc_read: size %u, pos %lld\n",
343 pmc
->last_cmd_status
= 0;
346 do_div(idx
, pmc
->descriptor_size
);
347 offset
= *f_pos
- (idx
* pmc
->descriptor_size
);
349 if (*f_pos
>= pmc_size
) {
351 "pmc_read: reached end of pmc buf: %lld >= %u\n",
352 *f_pos
, (u32
)pmc_size
);
353 pmc
->last_cmd_status
= -ERANGE
;
358 "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
359 *f_pos
, idx
, offset
, count
);
361 /* if no errors, return the copied byte count */
362 retval
= simple_read_from_buffer(buf
,
365 pmc
->descriptors
[idx
].va
,
366 pmc
->descriptor_size
);
369 mutex_unlock(&pmc
->lock
);
374 loff_t
wil_pmc_llseek(struct file
*filp
, loff_t off
, int whence
)
377 struct wil6210_priv
*wil
= filp
->private_data
;
378 struct pmc_ctx
*pmc
= &wil
->pmc
;
381 mutex_lock(&pmc
->lock
);
383 if (!wil_is_pmc_allocated(pmc
)) {
384 wil_err(wil
, "error, pmc is not allocated!\n");
385 pmc
->last_cmd_status
= -EPERM
;
386 mutex_unlock(&pmc
->lock
);
390 pmc_size
= pmc
->descriptor_size
* pmc
->num_descriptors
;
393 case 0: /* SEEK_SET */
397 case 1: /* SEEK_CUR */
398 newpos
= filp
->f_pos
+ off
;
401 case 2: /* SEEK_END */
405 default: /* can't happen */
414 if (newpos
> pmc_size
)
417 filp
->f_pos
= newpos
;
420 mutex_unlock(&pmc
->lock
);
425 int wil_pmcring_read(struct seq_file
*s
, void *data
)
427 struct wil6210_priv
*wil
= s
->private;
428 struct pmc_ctx
*pmc
= &wil
->pmc
;
429 size_t pmc_ring_size
=
430 sizeof(struct vring_rx_desc
) * pmc
->num_descriptors
;
432 mutex_lock(&pmc
->lock
);
434 if (!wil_is_pmc_allocated(pmc
)) {
435 wil_err(wil
, "error, pmc is not allocated!\n");
436 pmc
->last_cmd_status
= -EPERM
;
437 mutex_unlock(&pmc
->lock
);
441 wil_dbg_misc(wil
, "pmcring_read: size %zu\n", pmc_ring_size
);
443 seq_write(s
, pmc
->pring_va
, pmc_ring_size
);
445 mutex_unlock(&pmc
->lock
);