2 * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/types.h>
18 #include <linux/errno.h>
25 struct desc_alloc_info
{
30 static int wil_is_pmc_allocated(struct pmc_ctx
*pmc
)
32 return !!pmc
->pring_va
;
35 void wil_pmc_init(struct wil6210_priv
*wil
)
37 memset(&wil
->pmc
, 0, sizeof(struct pmc_ctx
));
38 mutex_init(&wil
->pmc
.lock
);
42 * Allocate the physical ring (p-ring) and the required
43 * number of descriptors of required size.
44 * Initialize the descriptors as required by pmc dma.
45 * The descriptors' buffers dwords are initialized to hold
46 * dword's serial number in the lsw and reserved value
47 * PCM_DATA_INVALID_DW_VAL in the msw.
49 void wil_pmc_alloc(struct wil6210_priv
*wil
,
54 struct pmc_ctx
*pmc
= &wil
->pmc
;
55 struct device
*dev
= wil_to_dev(wil
);
56 struct wmi_pmc_cmd pmc_cmd
= {0};
57 int last_cmd_err
= -ENOMEM
;
59 mutex_lock(&pmc
->lock
);
61 if (wil_is_pmc_allocated(pmc
)) {
63 wil_err(wil
, "ERROR pmc is already allocated\n");
66 if ((num_descriptors
<= 0) || (descriptor_size
<= 0)) {
68 "Invalid params num_descriptors(%d), descriptor_size(%d)\n",
69 num_descriptors
, descriptor_size
);
70 last_cmd_err
= -EINVAL
;
74 if (num_descriptors
> (1 << WIL_RING_SIZE_ORDER_MAX
)) {
76 "num_descriptors(%d) exceeds max ring size %d\n",
77 num_descriptors
, 1 << WIL_RING_SIZE_ORDER_MAX
);
78 last_cmd_err
= -EINVAL
;
82 if (num_descriptors
> INT_MAX
/ descriptor_size
) {
84 "Overflow in num_descriptors(%d)*descriptor_size(%d)\n",
85 num_descriptors
, descriptor_size
);
86 last_cmd_err
= -EINVAL
;
90 pmc
->num_descriptors
= num_descriptors
;
91 pmc
->descriptor_size
= descriptor_size
;
93 wil_dbg_misc(wil
, "pmc_alloc: %d descriptors x %d bytes each\n",
94 num_descriptors
, descriptor_size
);
96 /* allocate descriptors info list in pmc context*/
97 pmc
->descriptors
= kcalloc(num_descriptors
,
98 sizeof(struct desc_alloc_info
),
100 if (!pmc
->descriptors
) {
101 wil_err(wil
, "ERROR allocating pmc skb list\n");
105 wil_dbg_misc(wil
, "pmc_alloc: allocated descriptors info list %p\n",
108 /* Allocate pring buffer and descriptors.
109 * vring->va should be aligned on its size rounded up to power of 2
110 * This is granted by the dma_alloc_coherent.
112 * HW has limitation that all vrings addresses must share the same
113 * upper 16 msb bits part of 48 bits address. To workaround that,
114 * if we are using more than 32 bit addresses switch to 32 bit
115 * allocation before allocating vring memory.
117 * There's no check for the return value of dma_set_mask_and_coherent,
118 * since we assume if we were able to set the mask during
119 * initialization in this system it will not fail if we set it again
121 if (wil
->dma_addr_size
> 32)
122 dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
124 pmc
->pring_va
= dma_alloc_coherent(dev
,
125 sizeof(struct vring_tx_desc
) * num_descriptors
,
129 if (wil
->dma_addr_size
> 32)
130 dma_set_mask_and_coherent(dev
,
131 DMA_BIT_MASK(wil
->dma_addr_size
));
134 "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
135 pmc
->pring_va
, &pmc
->pring_pa
,
136 sizeof(struct vring_tx_desc
),
138 sizeof(struct vring_tx_desc
) * num_descriptors
);
140 if (!pmc
->pring_va
) {
141 wil_err(wil
, "ERROR allocating pmc pring\n");
142 goto release_pmc_skb_list
;
145 /* initially, all descriptors are SW owned
146 * For Tx, Rx, and PMC, ownership bit is at the same location, thus
149 for (i
= 0; i
< num_descriptors
; i
++) {
150 struct vring_tx_desc
*_d
= &pmc
->pring_va
[i
];
151 struct vring_tx_desc dd
= {}, *d
= &dd
;
154 pmc
->descriptors
[i
].va
= dma_alloc_coherent(dev
,
156 &pmc
->descriptors
[i
].pa
,
159 if (unlikely(!pmc
->descriptors
[i
].va
)) {
160 wil_err(wil
, "ERROR allocating pmc descriptor %d", i
);
161 goto release_pmc_skbs
;
164 for (j
= 0; j
< descriptor_size
/ sizeof(u32
); j
++) {
165 u32
*p
= (u32
*)pmc
->descriptors
[i
].va
+ j
;
166 *p
= PCM_DATA_INVALID_DW_VAL
| j
;
169 /* configure dma descriptor */
170 d
->dma
.addr
.addr_low
=
171 cpu_to_le32(lower_32_bits(pmc
->descriptors
[i
].pa
));
172 d
->dma
.addr
.addr_high
=
173 cpu_to_le16((u16
)upper_32_bits(pmc
->descriptors
[i
].pa
));
174 d
->dma
.status
= 0; /* 0 = HW_OWNED */
175 d
->dma
.length
= cpu_to_le16(descriptor_size
);
176 d
->dma
.d0
= BIT(9) | RX_DMA_D0_CMD_DMA_IT
;
180 wil_dbg_misc(wil
, "pmc_alloc: allocated successfully\n");
182 pmc_cmd
.op
= WMI_PMC_ALLOCATE
;
183 pmc_cmd
.ring_size
= cpu_to_le16(pmc
->num_descriptors
);
184 pmc_cmd
.mem_base
= cpu_to_le64(pmc
->pring_pa
);
186 wil_dbg_misc(wil
, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
187 pmc
->last_cmd_status
= wmi_send(wil
,
191 if (pmc
->last_cmd_status
) {
193 "WMI_PMC_CMD with ALLOCATE op failed with status %d",
194 pmc
->last_cmd_status
);
195 goto release_pmc_skbs
;
198 mutex_unlock(&pmc
->lock
);
203 wil_err(wil
, "exit on error: Releasing skbs...\n");
204 for (i
= 0; i
< num_descriptors
&& pmc
->descriptors
[i
].va
; i
++) {
205 dma_free_coherent(dev
,
207 pmc
->descriptors
[i
].va
,
208 pmc
->descriptors
[i
].pa
);
210 pmc
->descriptors
[i
].va
= NULL
;
212 wil_err(wil
, "exit on error: Releasing pring...\n");
214 dma_free_coherent(dev
,
215 sizeof(struct vring_tx_desc
) * num_descriptors
,
219 pmc
->pring_va
= NULL
;
221 release_pmc_skb_list
:
222 wil_err(wil
, "exit on error: Releasing descriptors info list...\n");
223 kfree(pmc
->descriptors
);
224 pmc
->descriptors
= NULL
;
227 pmc
->last_cmd_status
= last_cmd_err
;
228 mutex_unlock(&pmc
->lock
);
232 * Traverse the p-ring and release all buffers.
233 * At the end release the p-ring memory
235 void wil_pmc_free(struct wil6210_priv
*wil
, int send_pmc_cmd
)
237 struct pmc_ctx
*pmc
= &wil
->pmc
;
238 struct device
*dev
= wil_to_dev(wil
);
239 struct wmi_pmc_cmd pmc_cmd
= {0};
241 mutex_lock(&pmc
->lock
);
243 pmc
->last_cmd_status
= 0;
245 if (!wil_is_pmc_allocated(pmc
)) {
247 "pmc_free: Error, can't free - not allocated\n");
248 pmc
->last_cmd_status
= -EPERM
;
249 mutex_unlock(&pmc
->lock
);
254 wil_dbg_misc(wil
, "send WMI_PMC_CMD with RELEASE op\n");
255 pmc_cmd
.op
= WMI_PMC_RELEASE
;
256 pmc
->last_cmd_status
=
257 wmi_send(wil
, WMI_PMC_CMDID
, &pmc_cmd
,
259 if (pmc
->last_cmd_status
) {
261 "WMI_PMC_CMD with RELEASE op failed, status %d",
262 pmc
->last_cmd_status
);
263 /* There's nothing we can do with this error.
264 * Normally, it should never occur.
265 * Continue to freeing all memory allocated for pmc.
271 size_t buf_size
= sizeof(struct vring_tx_desc
) *
272 pmc
->num_descriptors
;
274 wil_dbg_misc(wil
, "pmc_free: free pring va %p\n",
276 dma_free_coherent(dev
, buf_size
, pmc
->pring_va
, pmc
->pring_pa
);
278 pmc
->pring_va
= NULL
;
280 pmc
->last_cmd_status
= -ENOENT
;
283 if (pmc
->descriptors
) {
287 i
< pmc
->num_descriptors
&& pmc
->descriptors
[i
].va
; i
++) {
288 dma_free_coherent(dev
,
289 pmc
->descriptor_size
,
290 pmc
->descriptors
[i
].va
,
291 pmc
->descriptors
[i
].pa
);
292 pmc
->descriptors
[i
].va
= NULL
;
294 wil_dbg_misc(wil
, "pmc_free: free descriptor info %d/%d\n", i
,
295 pmc
->num_descriptors
);
297 "pmc_free: free pmc descriptors info list %p\n",
299 kfree(pmc
->descriptors
);
300 pmc
->descriptors
= NULL
;
302 pmc
->last_cmd_status
= -ENOENT
;
305 mutex_unlock(&pmc
->lock
);
309 * Status of the last operation requested via debugfs: alloc/free/read.
310 * 0 - success or negative errno
312 int wil_pmc_last_cmd_status(struct wil6210_priv
*wil
)
314 wil_dbg_misc(wil
, "pmc_last_cmd_status: status %d\n",
315 wil
->pmc
.last_cmd_status
);
317 return wil
->pmc
.last_cmd_status
;
321 * Read from required position up to the end of current descriptor,
322 * depends on descriptor size configured during alloc request.
324 ssize_t
wil_pmc_read(struct file
*filp
, char __user
*buf
, size_t count
,
327 struct wil6210_priv
*wil
= filp
->private_data
;
328 struct pmc_ctx
*pmc
= &wil
->pmc
;
330 unsigned long long idx
;
334 mutex_lock(&pmc
->lock
);
336 if (!wil_is_pmc_allocated(pmc
)) {
337 wil_err(wil
, "error, pmc is not allocated!\n");
338 pmc
->last_cmd_status
= -EPERM
;
339 mutex_unlock(&pmc
->lock
);
343 pmc_size
= pmc
->descriptor_size
* pmc
->num_descriptors
;
346 "pmc_read: size %u, pos %lld\n",
349 pmc
->last_cmd_status
= 0;
352 do_div(idx
, pmc
->descriptor_size
);
353 offset
= *f_pos
- (idx
* pmc
->descriptor_size
);
355 if (*f_pos
>= pmc_size
) {
357 "pmc_read: reached end of pmc buf: %lld >= %u\n",
358 *f_pos
, (u32
)pmc_size
);
359 pmc
->last_cmd_status
= -ERANGE
;
364 "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
365 *f_pos
, idx
, offset
, count
);
367 /* if no errors, return the copied byte count */
368 retval
= simple_read_from_buffer(buf
,
371 pmc
->descriptors
[idx
].va
,
372 pmc
->descriptor_size
);
375 mutex_unlock(&pmc
->lock
);
380 loff_t
wil_pmc_llseek(struct file
*filp
, loff_t off
, int whence
)
383 struct wil6210_priv
*wil
= filp
->private_data
;
384 struct pmc_ctx
*pmc
= &wil
->pmc
;
387 mutex_lock(&pmc
->lock
);
389 if (!wil_is_pmc_allocated(pmc
)) {
390 wil_err(wil
, "error, pmc is not allocated!\n");
391 pmc
->last_cmd_status
= -EPERM
;
392 mutex_unlock(&pmc
->lock
);
396 pmc_size
= pmc
->descriptor_size
* pmc
->num_descriptors
;
399 case 0: /* SEEK_SET */
403 case 1: /* SEEK_CUR */
404 newpos
= filp
->f_pos
+ off
;
407 case 2: /* SEEK_END */
411 default: /* can't happen */
420 if (newpos
> pmc_size
)
423 filp
->f_pos
= newpos
;
426 mutex_unlock(&pmc
->lock
);