1 // SPDX-License-Identifier: GPL-2.0-only
3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
6 #include <asm/byteorder.h>
7 #include <linux/completion.h>
8 #include <linux/crc32.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/kref.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
15 #include <linux/moduleparam.h>
16 #include <linux/mutex.h>
17 #include <linux/overflow.h>
18 #include <linux/pci.h>
19 #include <linux/scatterlist.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/workqueue.h>
23 #include <linux/wait.h>
24 #include <drm/drm_device.h>
25 #include <drm/drm_file.h>
26 #include <uapi/drm/qaic_accel.h>
30 #define MANAGE_MAGIC_NUMBER ((__force __le32)0x43494151) /* "QAIC" in little endian */
31 #define QAIC_DBC_Q_GAP SZ_256
32 #define QAIC_DBC_Q_BUF_ALIGN SZ_4K
33 #define QAIC_MANAGE_EXT_MSG_LENGTH SZ_64K /* Max DMA message length */
34 #define QAIC_WRAPPER_MAX_SIZE SZ_4K
35 #define QAIC_MHI_RETRY_WAIT_MS 100
36 #define QAIC_MHI_RETRY_MAX 20
38 static unsigned int control_resp_timeout_s
= 60; /* 60 sec default */
39 module_param(control_resp_timeout_s
, uint
, 0600);
40 MODULE_PARM_DESC(control_resp_timeout_s
, "Timeout for NNC responses from QSM");
49 * wire encoding structures for the manage protocol.
50 * All fields are little endian on the wire
53 __le32 crc32
; /* crc of everything following this field in the message */
55 __le32 sequence_number
;
56 __le32 len
; /* length of this message */
57 __le32 count
; /* number of transactions in this message */
58 __le32 handle
; /* unique id to track the resources consumed */
59 __le32 partition_id
; /* partition id for the request (signed) */
60 __le32 padding
; /* must be 0 */
64 struct wire_msg_hdr hdr
;
68 struct wire_trans_hdr
{
73 /* Each message sent from driver to device are organized in a list of wrapper_msg */
75 struct list_head list
;
76 struct kref ref_count
;
77 u32 len
; /* length of data to transfer */
78 struct wrapper_list
*head
;
81 struct wire_trans_hdr trans
;
86 struct list_head list
;
87 spinlock_t lock
; /* Protects the list state during additions and removals */
90 struct wire_trans_passthrough
{
91 struct wire_trans_hdr hdr
;
95 struct wire_addr_size_pair
{
100 struct wire_trans_dma_xfer
{
101 struct wire_trans_hdr hdr
;
106 struct wire_addr_size_pair data
[];
109 /* Initiated by device to continue the DMA xfer of a large piece of data */
110 struct wire_trans_dma_xfer_cont
{
111 struct wire_trans_hdr hdr
;
117 struct wire_trans_activate_to_dev
{
118 struct wire_trans_hdr hdr
;
124 __le32 options
; /* unused, but BIT(16) has meaning to the device */
127 struct wire_trans_activate_from_dev
{
128 struct wire_trans_hdr hdr
;
131 __le64 options
; /* unused */
134 struct wire_trans_deactivate_from_dev
{
135 struct wire_trans_hdr hdr
;
140 struct wire_trans_terminate_to_dev
{
141 struct wire_trans_hdr hdr
;
146 struct wire_trans_terminate_from_dev
{
147 struct wire_trans_hdr hdr
;
152 struct wire_trans_status_to_dev
{
153 struct wire_trans_hdr hdr
;
156 struct wire_trans_status_from_dev
{
157 struct wire_trans_hdr hdr
;
164 struct wire_trans_validate_part_to_dev
{
165 struct wire_trans_hdr hdr
;
170 struct wire_trans_validate_part_from_dev
{
171 struct wire_trans_hdr hdr
;
176 struct xfer_queue_elem
{
178 * Node in list of ongoing transfer request on control channel.
179 * Maintained by root device struct.
181 struct list_head list
;
182 /* Sequence number of this transfer request */
184 /* This is used to wait on until completion of transfer request */
185 struct completion xfer_done
;
186 /* Received data from device */
191 /* Node in list of DMA transfers which is used for cleanup */
192 struct list_head list
;
193 /* SG table of memory used for DMA */
194 struct sg_table
*sgt
;
195 /* Array pages used for DMA */
196 struct page
**page_list
;
197 /* Number of pages used for DMA */
198 unsigned long nr_pages
;
201 struct ioctl_resources
{
202 /* List of all DMA transfers which is used later for cleanup */
203 struct list_head dma_xfers
;
204 /* Base address of request queue which belongs to a DBC */
207 * Base bus address of request queue which belongs to a DBC. Response
208 * queue base bus address can be calculated by adding size of request
209 * queue to base bus address of request queue.
212 /* Total size of request queue and response queue in byte */
214 /* Total number of elements that can be queued in each of request and response queue */
216 /* Base address of response queue which belongs to a DBC */
218 /* Status of the NNC message received */
220 /* DBC id of the DBC received from device */
223 * DMA transfer request messages can be big in size and it may not be
224 * possible to send them in one shot. In such cases the messages are
225 * broken into chunks, this field stores ID of such chunks.
228 /* Total number of bytes transferred for a DMA xfer request */
229 u64 xferred_dma_size
;
230 /* Header of transaction message received from user. Used during DMA xfer request. */
235 struct work_struct work
;
236 struct qaic_device
*qdev
;
241 * Since we're working with little endian messages, its useful to be able to
242 * increment without filling a whole line with conversions back and forth just
243 * to add one(1) to a message count.
245 static __le32
incr_le32(__le32 val
)
247 return cpu_to_le32(le32_to_cpu(val
) + 1);
250 static u32
gen_crc(void *msg
)
252 struct wrapper_list
*wrappers
= msg
;
253 struct wrapper_msg
*w
;
256 list_for_each_entry(w
, &wrappers
->list
, list
)
257 crc
= crc32(crc
, &w
->msg
, w
->len
);
262 static u32
gen_crc_stub(void *msg
)
267 static bool valid_crc(void *msg
)
269 struct wire_msg_hdr
*hdr
= msg
;
274 * The output of this algorithm is always converted to the native
277 crc
= le32_to_cpu(hdr
->crc32
);
279 ret
= (crc32(~0, msg
, le32_to_cpu(hdr
->len
)) ^ ~0) == crc
;
280 hdr
->crc32
= cpu_to_le32(crc
);
284 static bool valid_crc_stub(void *msg
)
289 static void free_wrapper(struct kref
*ref
)
291 struct wrapper_msg
*wrapper
= container_of(ref
, struct wrapper_msg
, ref_count
);
293 list_del(&wrapper
->list
);
297 static void save_dbc_buf(struct qaic_device
*qdev
, struct ioctl_resources
*resources
,
298 struct qaic_user
*usr
)
300 u32 dbc_id
= resources
->dbc_id
;
302 if (resources
->buf
) {
303 wait_event_interruptible(qdev
->dbc
[dbc_id
].dbc_release
, !qdev
->dbc
[dbc_id
].in_use
);
304 qdev
->dbc
[dbc_id
].req_q_base
= resources
->buf
;
305 qdev
->dbc
[dbc_id
].rsp_q_base
= resources
->rsp_q_base
;
306 qdev
->dbc
[dbc_id
].dma_addr
= resources
->dma_addr
;
307 qdev
->dbc
[dbc_id
].total_size
= resources
->total_size
;
308 qdev
->dbc
[dbc_id
].nelem
= resources
->nelem
;
309 enable_dbc(qdev
, dbc_id
, usr
);
310 qdev
->dbc
[dbc_id
].in_use
= true;
311 resources
->buf
= NULL
;
315 static void free_dbc_buf(struct qaic_device
*qdev
, struct ioctl_resources
*resources
)
318 dma_free_coherent(&qdev
->pdev
->dev
, resources
->total_size
, resources
->buf
,
319 resources
->dma_addr
);
320 resources
->buf
= NULL
;
323 static void free_dma_xfers(struct qaic_device
*qdev
, struct ioctl_resources
*resources
)
325 struct dma_xfer
*xfer
;
329 list_for_each_entry_safe(xfer
, x
, &resources
->dma_xfers
, list
) {
330 dma_unmap_sgtable(&qdev
->pdev
->dev
, xfer
->sgt
, DMA_TO_DEVICE
, 0);
331 sg_free_table(xfer
->sgt
);
333 for (i
= 0; i
< xfer
->nr_pages
; ++i
)
334 put_page(xfer
->page_list
[i
]);
335 kfree(xfer
->page_list
);
336 list_del(&xfer
->list
);
341 static struct wrapper_msg
*add_wrapper(struct wrapper_list
*wrappers
, u32 size
)
343 struct wrapper_msg
*w
= kzalloc(size
, GFP_KERNEL
);
347 list_add_tail(&w
->list
, &wrappers
->list
);
348 kref_init(&w
->ref_count
);
353 static int encode_passthrough(struct qaic_device
*qdev
, void *trans
, struct wrapper_list
*wrappers
,
356 struct qaic_manage_trans_passthrough
*in_trans
= trans
;
357 struct wire_trans_passthrough
*out_trans
;
358 struct wrapper_msg
*trans_wrapper
;
359 struct wrapper_msg
*wrapper
;
360 struct wire_msg
*msg
;
363 wrapper
= list_first_entry(&wrappers
->list
, struct wrapper_msg
, list
);
365 msg_hdr_len
= le32_to_cpu(msg
->hdr
.len
);
367 if (in_trans
->hdr
.len
% 8 != 0)
370 if (size_add(msg_hdr_len
, in_trans
->hdr
.len
) > QAIC_MANAGE_EXT_MSG_LENGTH
)
373 trans_wrapper
= add_wrapper(wrappers
,
374 offsetof(struct wrapper_msg
, trans
) + in_trans
->hdr
.len
);
377 trans_wrapper
->len
= in_trans
->hdr
.len
;
378 out_trans
= (struct wire_trans_passthrough
*)&trans_wrapper
->trans
;
380 memcpy(out_trans
->data
, in_trans
->data
, in_trans
->hdr
.len
- sizeof(in_trans
->hdr
));
381 msg
->hdr
.len
= cpu_to_le32(msg_hdr_len
+ in_trans
->hdr
.len
);
382 msg
->hdr
.count
= incr_le32(msg
->hdr
.count
);
383 *user_len
+= in_trans
->hdr
.len
;
384 out_trans
->hdr
.type
= cpu_to_le32(QAIC_TRANS_PASSTHROUGH_TO_DEV
);
385 out_trans
->hdr
.len
= cpu_to_le32(in_trans
->hdr
.len
);
390 /* returns error code for failure, 0 if enough pages alloc'd, 1 if dma_cont is needed */
391 static int find_and_map_user_pages(struct qaic_device
*qdev
,
392 struct qaic_manage_trans_dma_xfer
*in_trans
,
393 struct ioctl_resources
*resources
, struct dma_xfer
*xfer
)
395 u64 xfer_start_addr
, remaining
, end
, total
;
396 unsigned long need_pages
;
397 struct page
**page_list
;
398 unsigned long nr_pages
;
399 struct sg_table
*sgt
;
403 if (check_add_overflow(in_trans
->addr
, resources
->xferred_dma_size
, &xfer_start_addr
))
406 if (in_trans
->size
< resources
->xferred_dma_size
)
408 remaining
= in_trans
->size
- resources
->xferred_dma_size
;
412 if (check_add_overflow(xfer_start_addr
, remaining
, &end
))
415 total
= remaining
+ offset_in_page(xfer_start_addr
);
416 if (total
>= SIZE_MAX
)
419 need_pages
= DIV_ROUND_UP(total
, PAGE_SIZE
);
421 nr_pages
= need_pages
;
424 page_list
= kmalloc_array(nr_pages
, sizeof(*page_list
), GFP_KERNEL
| __GFP_NOWARN
);
426 nr_pages
= nr_pages
/ 2;
434 ret
= get_user_pages_fast(xfer_start_addr
, nr_pages
, 0, page_list
);
437 if (ret
!= nr_pages
) {
443 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
449 ret
= sg_alloc_table_from_pages(sgt
, page_list
, nr_pages
,
450 offset_in_page(xfer_start_addr
),
451 remaining
, GFP_KERNEL
);
457 ret
= dma_map_sgtable(&qdev
->pdev
->dev
, sgt
, DMA_TO_DEVICE
, 0);
462 xfer
->page_list
= page_list
;
463 xfer
->nr_pages
= nr_pages
;
465 return need_pages
> nr_pages
? 1 : 0;
472 for (i
= 0; i
< nr_pages
; ++i
)
473 put_page(page_list
[i
]);
479 /* returns error code for failure, 0 if everything was encoded, 1 if dma_cont is needed */
480 static int encode_addr_size_pairs(struct dma_xfer
*xfer
, struct wrapper_list
*wrappers
,
481 struct ioctl_resources
*resources
, u32 msg_hdr_len
, u32
*size
,
482 struct wire_trans_dma_xfer
**out_trans
)
484 struct wrapper_msg
*trans_wrapper
;
485 struct sg_table
*sgt
= xfer
->sgt
;
486 struct wire_addr_size_pair
*asp
;
487 struct scatterlist
*sg
;
488 struct wrapper_msg
*w
;
489 unsigned int dma_len
;
498 *size
= QAIC_MANAGE_EXT_MSG_LENGTH
- msg_hdr_len
- sizeof(**out_trans
);
499 for_each_sgtable_dma_sg(sgt
, sg
, i
) {
500 *size
-= sizeof(*asp
);
501 /* Save 1K for possible follow-up transactions. */
508 trans_wrapper
= add_wrapper(wrappers
, QAIC_WRAPPER_MAX_SIZE
);
511 *out_trans
= (struct wire_trans_dma_xfer
*)&trans_wrapper
->trans
;
513 asp
= (*out_trans
)->data
;
514 boundary
= (void *)trans_wrapper
+ QAIC_WRAPPER_MAX_SIZE
;
520 for_each_sg(sgt
->sgl
, sg
, nents_dma
, i
) {
521 asp
->size
= cpu_to_le64(dma_len
);
522 dma_chunk_len
+= dma_len
;
525 if ((void *)asp
+ sizeof(*asp
) > boundary
) {
526 w
->len
= (void *)asp
- (void *)&w
->msg
;
528 w
= add_wrapper(wrappers
, QAIC_WRAPPER_MAX_SIZE
);
531 boundary
= (void *)w
+ QAIC_WRAPPER_MAX_SIZE
;
532 asp
= (struct wire_addr_size_pair
*)&w
->msg
;
535 asp
->addr
= cpu_to_le64(sg_dma_address(sg
));
536 dma_len
= sg_dma_len(sg
);
538 /* finalize the last segment */
539 asp
->size
= cpu_to_le64(dma_len
);
540 w
->len
= (void *)asp
+ sizeof(*asp
) - (void *)&w
->msg
;
542 dma_chunk_len
+= dma_len
;
543 resources
->xferred_dma_size
+= dma_chunk_len
;
545 return nents_dma
< nents
? 1 : 0;
548 static void cleanup_xfer(struct qaic_device
*qdev
, struct dma_xfer
*xfer
)
552 dma_unmap_sgtable(&qdev
->pdev
->dev
, xfer
->sgt
, DMA_TO_DEVICE
, 0);
553 sg_free_table(xfer
->sgt
);
555 for (i
= 0; i
< xfer
->nr_pages
; ++i
)
556 put_page(xfer
->page_list
[i
]);
557 kfree(xfer
->page_list
);
560 static int encode_dma(struct qaic_device
*qdev
, void *trans
, struct wrapper_list
*wrappers
,
561 u32
*user_len
, struct ioctl_resources
*resources
, struct qaic_user
*usr
)
563 struct qaic_manage_trans_dma_xfer
*in_trans
= trans
;
564 struct wire_trans_dma_xfer
*out_trans
;
565 struct wrapper_msg
*wrapper
;
566 struct dma_xfer
*xfer
;
567 struct wire_msg
*msg
;
573 wrapper
= list_first_entry(&wrappers
->list
, struct wrapper_msg
, list
);
575 msg_hdr_len
= le32_to_cpu(msg
->hdr
.len
);
577 /* There should be enough space to hold at least one ASP entry. */
578 if (size_add(msg_hdr_len
, sizeof(*out_trans
) + sizeof(struct wire_addr_size_pair
)) >
579 QAIC_MANAGE_EXT_MSG_LENGTH
)
582 xfer
= kmalloc(sizeof(*xfer
), GFP_KERNEL
);
586 ret
= find_and_map_user_pages(qdev
, in_trans
, resources
, xfer
);
590 need_cont_dma
= (bool)ret
;
592 ret
= encode_addr_size_pairs(xfer
, wrappers
, resources
, msg_hdr_len
, &size
, &out_trans
);
596 need_cont_dma
= need_cont_dma
|| (bool)ret
;
598 msg
->hdr
.len
= cpu_to_le32(msg_hdr_len
+ size
);
599 msg
->hdr
.count
= incr_le32(msg
->hdr
.count
);
601 out_trans
->hdr
.type
= cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV
);
602 out_trans
->hdr
.len
= cpu_to_le32(size
);
603 out_trans
->tag
= cpu_to_le32(in_trans
->tag
);
604 out_trans
->count
= cpu_to_le32((size
- sizeof(*out_trans
)) /
605 sizeof(struct wire_addr_size_pair
));
607 *user_len
+= in_trans
->hdr
.len
;
609 if (resources
->dma_chunk_id
) {
610 out_trans
->dma_chunk_id
= cpu_to_le32(resources
->dma_chunk_id
);
611 } else if (need_cont_dma
) {
612 while (resources
->dma_chunk_id
== 0)
613 resources
->dma_chunk_id
= atomic_inc_return(&usr
->chunk_id
);
615 out_trans
->dma_chunk_id
= cpu_to_le32(resources
->dma_chunk_id
);
617 resources
->trans_hdr
= trans
;
619 list_add(&xfer
->list
, &resources
->dma_xfers
);
623 cleanup_xfer(qdev
, xfer
);
629 static int encode_activate(struct qaic_device
*qdev
, void *trans
, struct wrapper_list
*wrappers
,
630 u32
*user_len
, struct ioctl_resources
*resources
)
632 struct qaic_manage_trans_activate_to_dev
*in_trans
= trans
;
633 struct wire_trans_activate_to_dev
*out_trans
;
634 struct wrapper_msg
*trans_wrapper
;
635 struct wrapper_msg
*wrapper
;
636 struct wire_msg
*msg
;
644 wrapper
= list_first_entry(&wrappers
->list
, struct wrapper_msg
, list
);
646 msg_hdr_len
= le32_to_cpu(msg
->hdr
.len
);
648 if (size_add(msg_hdr_len
, sizeof(*out_trans
)) > QAIC_MANAGE_MAX_MSG_LENGTH
)
651 if (!in_trans
->queue_size
)
657 nelem
= in_trans
->queue_size
;
658 size
= (get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) * nelem
;
659 if (size
/ nelem
!= get_dbc_req_elem_size() + get_dbc_rsp_elem_size())
662 if (size
+ QAIC_DBC_Q_GAP
+ QAIC_DBC_Q_BUF_ALIGN
< size
)
665 size
= ALIGN((size
+ QAIC_DBC_Q_GAP
), QAIC_DBC_Q_BUF_ALIGN
);
667 buf
= dma_alloc_coherent(&qdev
->pdev
->dev
, size
, &dma_addr
, GFP_KERNEL
);
671 trans_wrapper
= add_wrapper(wrappers
,
672 offsetof(struct wrapper_msg
, trans
) + sizeof(*out_trans
));
673 if (!trans_wrapper
) {
677 trans_wrapper
->len
= sizeof(*out_trans
);
678 out_trans
= (struct wire_trans_activate_to_dev
*)&trans_wrapper
->trans
;
680 out_trans
->hdr
.type
= cpu_to_le32(QAIC_TRANS_ACTIVATE_TO_DEV
);
681 out_trans
->hdr
.len
= cpu_to_le32(sizeof(*out_trans
));
682 out_trans
->buf_len
= cpu_to_le32(size
);
683 out_trans
->req_q_addr
= cpu_to_le64(dma_addr
);
684 out_trans
->req_q_size
= cpu_to_le32(nelem
);
685 out_trans
->rsp_q_addr
= cpu_to_le64(dma_addr
+ size
- nelem
* get_dbc_rsp_elem_size());
686 out_trans
->rsp_q_size
= cpu_to_le32(nelem
);
687 out_trans
->options
= cpu_to_le32(in_trans
->options
);
689 *user_len
+= in_trans
->hdr
.len
;
690 msg
->hdr
.len
= cpu_to_le32(msg_hdr_len
+ sizeof(*out_trans
));
691 msg
->hdr
.count
= incr_le32(msg
->hdr
.count
);
693 resources
->buf
= buf
;
694 resources
->dma_addr
= dma_addr
;
695 resources
->total_size
= size
;
696 resources
->nelem
= nelem
;
697 resources
->rsp_q_base
= buf
+ size
- nelem
* get_dbc_rsp_elem_size();
701 dma_free_coherent(&qdev
->pdev
->dev
, size
, buf
, dma_addr
);
705 static int encode_deactivate(struct qaic_device
*qdev
, void *trans
,
706 u32
*user_len
, struct qaic_user
*usr
)
708 struct qaic_manage_trans_deactivate
*in_trans
= trans
;
710 if (in_trans
->dbc_id
>= qdev
->num_dbc
|| in_trans
->pad
)
713 *user_len
+= in_trans
->hdr
.len
;
715 return disable_dbc(qdev
, in_trans
->dbc_id
, usr
);
718 static int encode_status(struct qaic_device
*qdev
, void *trans
, struct wrapper_list
*wrappers
,
721 struct qaic_manage_trans_status_to_dev
*in_trans
= trans
;
722 struct wire_trans_status_to_dev
*out_trans
;
723 struct wrapper_msg
*trans_wrapper
;
724 struct wrapper_msg
*wrapper
;
725 struct wire_msg
*msg
;
728 wrapper
= list_first_entry(&wrappers
->list
, struct wrapper_msg
, list
);
730 msg_hdr_len
= le32_to_cpu(msg
->hdr
.len
);
732 if (size_add(msg_hdr_len
, in_trans
->hdr
.len
) > QAIC_MANAGE_MAX_MSG_LENGTH
)
735 trans_wrapper
= add_wrapper(wrappers
, sizeof(*trans_wrapper
));
739 trans_wrapper
->len
= sizeof(*out_trans
);
740 out_trans
= (struct wire_trans_status_to_dev
*)&trans_wrapper
->trans
;
742 out_trans
->hdr
.type
= cpu_to_le32(QAIC_TRANS_STATUS_TO_DEV
);
743 out_trans
->hdr
.len
= cpu_to_le32(in_trans
->hdr
.len
);
744 msg
->hdr
.len
= cpu_to_le32(msg_hdr_len
+ in_trans
->hdr
.len
);
745 msg
->hdr
.count
= incr_le32(msg
->hdr
.count
);
746 *user_len
+= in_trans
->hdr
.len
;
751 static int encode_message(struct qaic_device
*qdev
, struct manage_msg
*user_msg
,
752 struct wrapper_list
*wrappers
, struct ioctl_resources
*resources
,
753 struct qaic_user
*usr
)
755 struct qaic_manage_trans_hdr
*trans_hdr
;
756 struct wrapper_msg
*wrapper
;
757 struct wire_msg
*msg
;
762 if (!user_msg
->count
||
763 user_msg
->len
< sizeof(*trans_hdr
)) {
768 wrapper
= list_first_entry(&wrappers
->list
, struct wrapper_msg
, list
);
771 msg
->hdr
.len
= cpu_to_le32(sizeof(msg
->hdr
));
773 if (resources
->dma_chunk_id
) {
774 ret
= encode_dma(qdev
, resources
->trans_hdr
, wrappers
, &user_len
, resources
, usr
);
775 msg
->hdr
.count
= cpu_to_le32(1);
779 for (i
= 0; i
< user_msg
->count
; ++i
) {
780 if (user_len
> user_msg
->len
- sizeof(*trans_hdr
)) {
784 trans_hdr
= (struct qaic_manage_trans_hdr
*)(user_msg
->data
+ user_len
);
785 if (trans_hdr
->len
< sizeof(trans_hdr
) ||
786 size_add(user_len
, trans_hdr
->len
) > user_msg
->len
) {
791 switch (trans_hdr
->type
) {
792 case QAIC_TRANS_PASSTHROUGH_FROM_USR
:
793 ret
= encode_passthrough(qdev
, trans_hdr
, wrappers
, &user_len
);
795 case QAIC_TRANS_DMA_XFER_FROM_USR
:
796 ret
= encode_dma(qdev
, trans_hdr
, wrappers
, &user_len
, resources
, usr
);
798 case QAIC_TRANS_ACTIVATE_FROM_USR
:
799 ret
= encode_activate(qdev
, trans_hdr
, wrappers
, &user_len
, resources
);
801 case QAIC_TRANS_DEACTIVATE_FROM_USR
:
802 ret
= encode_deactivate(qdev
, trans_hdr
, &user_len
, usr
);
804 case QAIC_TRANS_STATUS_FROM_USR
:
805 ret
= encode_status(qdev
, trans_hdr
, wrappers
, &user_len
);
816 if (user_len
!= user_msg
->len
)
820 free_dma_xfers(qdev
, resources
);
821 free_dbc_buf(qdev
, resources
);
828 static int decode_passthrough(struct qaic_device
*qdev
, void *trans
, struct manage_msg
*user_msg
,
831 struct qaic_manage_trans_passthrough
*out_trans
;
832 struct wire_trans_passthrough
*in_trans
= trans
;
835 out_trans
= (void *)user_msg
->data
+ user_msg
->len
;
837 len
= le32_to_cpu(in_trans
->hdr
.len
);
841 if (user_msg
->len
+ len
> QAIC_MANAGE_MAX_MSG_LENGTH
)
844 memcpy(out_trans
->data
, in_trans
->data
, len
- sizeof(in_trans
->hdr
));
845 user_msg
->len
+= len
;
847 out_trans
->hdr
.type
= le32_to_cpu(in_trans
->hdr
.type
);
848 out_trans
->hdr
.len
= len
;
853 static int decode_activate(struct qaic_device
*qdev
, void *trans
, struct manage_msg
*user_msg
,
854 u32
*msg_len
, struct ioctl_resources
*resources
, struct qaic_user
*usr
)
856 struct qaic_manage_trans_activate_from_dev
*out_trans
;
857 struct wire_trans_activate_from_dev
*in_trans
= trans
;
860 out_trans
= (void *)user_msg
->data
+ user_msg
->len
;
862 len
= le32_to_cpu(in_trans
->hdr
.len
);
863 if (user_msg
->len
+ len
> QAIC_MANAGE_MAX_MSG_LENGTH
)
866 user_msg
->len
+= len
;
868 out_trans
->hdr
.type
= le32_to_cpu(in_trans
->hdr
.type
);
869 out_trans
->hdr
.len
= len
;
870 out_trans
->status
= le32_to_cpu(in_trans
->status
);
871 out_trans
->dbc_id
= le32_to_cpu(in_trans
->dbc_id
);
872 out_trans
->options
= le64_to_cpu(in_trans
->options
);
875 /* how did we get an activate response without a request? */
878 if (out_trans
->dbc_id
>= qdev
->num_dbc
)
880 * The device assigned an invalid resource, which should never
881 * happen. Return an error so the user can try to recover.
885 if (out_trans
->status
)
887 * Allocating resources failed on device side. This is not an
888 * expected behaviour, user is expected to handle this situation.
892 resources
->status
= out_trans
->status
;
893 resources
->dbc_id
= out_trans
->dbc_id
;
894 save_dbc_buf(qdev
, resources
, usr
);
899 static int decode_deactivate(struct qaic_device
*qdev
, void *trans
, u32
*msg_len
,
900 struct qaic_user
*usr
)
902 struct wire_trans_deactivate_from_dev
*in_trans
= trans
;
903 u32 dbc_id
= le32_to_cpu(in_trans
->dbc_id
);
904 u32 status
= le32_to_cpu(in_trans
->status
);
906 if (dbc_id
>= qdev
->num_dbc
)
908 * The device assigned an invalid resource, which should never
909 * happen. Inject an error so the user can try to recover.
915 * Releasing resources failed on the device side, which puts
916 * us in a bind since they may still be in use, so enable the
917 * dbc. User is expected to retry deactivation.
919 enable_dbc(qdev
, dbc_id
, usr
);
923 release_dbc(qdev
, dbc_id
);
924 *msg_len
+= sizeof(*in_trans
);
929 static int decode_status(struct qaic_device
*qdev
, void *trans
, struct manage_msg
*user_msg
,
930 u32
*user_len
, struct wire_msg
*msg
)
932 struct qaic_manage_trans_status_from_dev
*out_trans
;
933 struct wire_trans_status_from_dev
*in_trans
= trans
;
936 out_trans
= (void *)user_msg
->data
+ user_msg
->len
;
938 len
= le32_to_cpu(in_trans
->hdr
.len
);
939 if (user_msg
->len
+ len
> QAIC_MANAGE_MAX_MSG_LENGTH
)
942 out_trans
->hdr
.type
= QAIC_TRANS_STATUS_FROM_DEV
;
943 out_trans
->hdr
.len
= len
;
944 out_trans
->major
= le16_to_cpu(in_trans
->major
);
945 out_trans
->minor
= le16_to_cpu(in_trans
->minor
);
946 out_trans
->status_flags
= le64_to_cpu(in_trans
->status_flags
);
947 out_trans
->status
= le32_to_cpu(in_trans
->status
);
948 *user_len
+= le32_to_cpu(in_trans
->hdr
.len
);
949 user_msg
->len
+= len
;
951 if (out_trans
->status
)
953 if (out_trans
->status_flags
& BIT(0) && !valid_crc(msg
))
959 static int decode_message(struct qaic_device
*qdev
, struct manage_msg
*user_msg
,
960 struct wire_msg
*msg
, struct ioctl_resources
*resources
,
961 struct qaic_user
*usr
)
963 u32 msg_hdr_len
= le32_to_cpu(msg
->hdr
.len
);
964 struct wire_trans_hdr
*trans_hdr
;
969 if (msg_hdr_len
< sizeof(*trans_hdr
) ||
970 msg_hdr_len
> QAIC_MANAGE_MAX_MSG_LENGTH
)
974 user_msg
->count
= le32_to_cpu(msg
->hdr
.count
);
976 for (i
= 0; i
< user_msg
->count
; ++i
) {
979 if (msg_len
> msg_hdr_len
- sizeof(*trans_hdr
))
982 trans_hdr
= (struct wire_trans_hdr
*)(msg
->data
+ msg_len
);
983 hdr_len
= le32_to_cpu(trans_hdr
->len
);
984 if (hdr_len
< sizeof(*trans_hdr
) ||
985 size_add(msg_len
, hdr_len
) > msg_hdr_len
)
988 switch (le32_to_cpu(trans_hdr
->type
)) {
989 case QAIC_TRANS_PASSTHROUGH_FROM_DEV
:
990 ret
= decode_passthrough(qdev
, trans_hdr
, user_msg
, &msg_len
);
992 case QAIC_TRANS_ACTIVATE_FROM_DEV
:
993 ret
= decode_activate(qdev
, trans_hdr
, user_msg
, &msg_len
, resources
, usr
);
995 case QAIC_TRANS_DEACTIVATE_FROM_DEV
:
996 ret
= decode_deactivate(qdev
, trans_hdr
, &msg_len
, usr
);
998 case QAIC_TRANS_STATUS_FROM_DEV
:
999 ret
= decode_status(qdev
, trans_hdr
, user_msg
, &msg_len
, msg
);
1009 if (msg_len
!= (msg_hdr_len
- sizeof(msg
->hdr
)))
1015 static void *msg_xfer(struct qaic_device
*qdev
, struct wrapper_list
*wrappers
, u32 seq_num
,
1018 struct xfer_queue_elem elem
;
1019 struct wire_msg
*out_buf
;
1020 struct wrapper_msg
*w
;
1025 /* Allow QAIC_BOOT state since we need to check control protocol version */
1026 if (qdev
->dev_state
== QAIC_OFFLINE
) {
1027 mutex_unlock(&qdev
->cntl_mutex
);
1028 return ERR_PTR(-ENODEV
);
1031 /* Attempt to avoid a partial commit of a message */
1032 list_for_each_entry(w
, &wrappers
->list
, list
)
1035 for (retry_count
= 0; retry_count
< QAIC_MHI_RETRY_MAX
; retry_count
++) {
1036 if (xfer_count
<= mhi_get_free_desc_count(qdev
->cntl_ch
, DMA_TO_DEVICE
)) {
1040 msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS
);
1041 if (signal_pending(current
))
1046 mutex_unlock(&qdev
->cntl_mutex
);
1047 return ERR_PTR(ret
);
1050 elem
.seq_num
= seq_num
;
1052 init_completion(&elem
.xfer_done
);
1053 if (likely(!qdev
->cntl_lost_buf
)) {
1055 * The max size of request to device is QAIC_MANAGE_EXT_MSG_LENGTH.
1056 * The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH.
1058 out_buf
= kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH
, GFP_KERNEL
);
1060 mutex_unlock(&qdev
->cntl_mutex
);
1061 return ERR_PTR(-ENOMEM
);
1064 ret
= mhi_queue_buf(qdev
->cntl_ch
, DMA_FROM_DEVICE
, out_buf
,
1065 QAIC_MANAGE_MAX_MSG_LENGTH
, MHI_EOT
);
1067 mutex_unlock(&qdev
->cntl_mutex
);
1068 return ERR_PTR(ret
);
1072 * we lost a buffer because we queued a recv buf, but then
1073 * queuing the corresponding tx buf failed. To try to avoid
1074 * a memory leak, lets reclaim it and use it for this
1077 qdev
->cntl_lost_buf
= false;
1080 list_for_each_entry(w
, &wrappers
->list
, list
) {
1081 kref_get(&w
->ref_count
);
1083 ret
= mhi_queue_buf(qdev
->cntl_ch
, DMA_TO_DEVICE
, &w
->msg
, w
->len
,
1084 list_is_last(&w
->list
, &wrappers
->list
) ? MHI_EOT
: MHI_CHAIN
);
1086 qdev
->cntl_lost_buf
= true;
1087 kref_put(&w
->ref_count
, free_wrapper
);
1088 mutex_unlock(&qdev
->cntl_mutex
);
1089 return ERR_PTR(ret
);
1093 list_add_tail(&elem
.list
, &qdev
->cntl_xfer_list
);
1094 mutex_unlock(&qdev
->cntl_mutex
);
1097 ret
= wait_for_completion_timeout(&elem
.xfer_done
, control_resp_timeout_s
* HZ
);
1099 ret
= wait_for_completion_interruptible_timeout(&elem
.xfer_done
,
1100 control_resp_timeout_s
* HZ
);
1102 * not using _interruptable because we have to cleanup or we'll
1103 * likely cause memory corruption
1105 mutex_lock(&qdev
->cntl_mutex
);
1106 if (!list_empty(&elem
.list
))
1107 list_del(&elem
.list
);
1108 if (!ret
&& !elem
.buf
)
1110 else if (ret
> 0 && !elem
.buf
)
1112 mutex_unlock(&qdev
->cntl_mutex
);
1116 return ERR_PTR(ret
);
1117 } else if (!qdev
->valid_crc(elem
.buf
)) {
1119 return ERR_PTR(-EPIPE
);
1125 /* Add a transaction to abort the outstanding DMA continuation */
1126 static int abort_dma_cont(struct qaic_device
*qdev
, struct wrapper_list
*wrappers
, u32 dma_chunk_id
)
1128 struct wire_trans_dma_xfer
*out_trans
;
1129 u32 size
= sizeof(*out_trans
);
1130 struct wrapper_msg
*wrapper
;
1131 struct wrapper_msg
*w
;
1132 struct wire_msg
*msg
;
1134 wrapper
= list_first_entry(&wrappers
->list
, struct wrapper_msg
, list
);
1135 msg
= &wrapper
->msg
;
1137 /* Remove all but the first wrapper which has the msg header */
1138 list_for_each_entry_safe(wrapper
, w
, &wrappers
->list
, list
)
1139 if (!list_is_first(&wrapper
->list
, &wrappers
->list
))
1140 kref_put(&wrapper
->ref_count
, free_wrapper
);
1142 wrapper
= add_wrapper(wrappers
, sizeof(*wrapper
));
1147 out_trans
= (struct wire_trans_dma_xfer
*)&wrapper
->trans
;
1148 out_trans
->hdr
.type
= cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV
);
1149 out_trans
->hdr
.len
= cpu_to_le32(size
);
1150 out_trans
->tag
= cpu_to_le32(0);
1151 out_trans
->count
= cpu_to_le32(0);
1152 out_trans
->dma_chunk_id
= cpu_to_le32(dma_chunk_id
);
1154 msg
->hdr
.len
= cpu_to_le32(size
+ sizeof(*msg
));
1155 msg
->hdr
.count
= cpu_to_le32(1);
1156 wrapper
->len
= size
;
1161 static struct wrapper_list
*alloc_wrapper_list(void)
1163 struct wrapper_list
*wrappers
;
1165 wrappers
= kmalloc(sizeof(*wrappers
), GFP_KERNEL
);
1168 INIT_LIST_HEAD(&wrappers
->list
);
1169 spin_lock_init(&wrappers
->lock
);
1174 static int qaic_manage_msg_xfer(struct qaic_device
*qdev
, struct qaic_user
*usr
,
1175 struct manage_msg
*user_msg
, struct ioctl_resources
*resources
,
1176 struct wire_msg
**rsp
)
1178 struct wrapper_list
*wrappers
;
1179 struct wrapper_msg
*wrapper
;
1180 struct wrapper_msg
*w
;
1181 bool all_done
= false;
1182 struct wire_msg
*msg
;
1185 wrappers
= alloc_wrapper_list();
1189 wrapper
= add_wrapper(wrappers
, sizeof(*wrapper
));
1195 msg
= &wrapper
->msg
;
1196 wrapper
->len
= sizeof(*msg
);
1198 ret
= encode_message(qdev
, user_msg
, wrappers
, resources
, usr
);
1199 if (ret
&& resources
->dma_chunk_id
)
1200 ret
= abort_dma_cont(qdev
, wrappers
, resources
->dma_chunk_id
);
1204 ret
= mutex_lock_interruptible(&qdev
->cntl_mutex
);
1208 msg
->hdr
.magic_number
= MANAGE_MAGIC_NUMBER
;
1209 msg
->hdr
.sequence_number
= cpu_to_le32(qdev
->next_seq_num
++);
1212 msg
->hdr
.handle
= cpu_to_le32(usr
->handle
);
1213 msg
->hdr
.partition_id
= cpu_to_le32(usr
->qddev
->partition_id
);
1215 msg
->hdr
.handle
= 0;
1216 msg
->hdr
.partition_id
= cpu_to_le32(QAIC_NO_PARTITION
);
1219 msg
->hdr
.padding
= cpu_to_le32(0);
1220 msg
->hdr
.crc32
= cpu_to_le32(qdev
->gen_crc(wrappers
));
1222 /* msg_xfer releases the mutex */
1223 *rsp
= msg_xfer(qdev
, wrappers
, qdev
->next_seq_num
- 1, false);
1225 ret
= PTR_ERR(*rsp
);
1228 free_dma_xfers(qdev
, resources
);
1230 spin_lock(&wrappers
->lock
);
1231 list_for_each_entry_safe(wrapper
, w
, &wrappers
->list
, list
)
1232 kref_put(&wrapper
->ref_count
, free_wrapper
);
1233 all_done
= list_empty(&wrappers
->list
);
1234 spin_unlock(&wrappers
->lock
);
1241 static int qaic_manage(struct qaic_device
*qdev
, struct qaic_user
*usr
, struct manage_msg
*user_msg
)
1243 struct wire_trans_dma_xfer_cont
*dma_cont
= NULL
;
1244 struct ioctl_resources resources
;
1245 struct wire_msg
*rsp
= NULL
;
1248 memset(&resources
, 0, sizeof(struct ioctl_resources
));
1250 INIT_LIST_HEAD(&resources
.dma_xfers
);
1252 if (user_msg
->len
> QAIC_MANAGE_MAX_MSG_LENGTH
||
1253 user_msg
->count
> QAIC_MANAGE_MAX_MSG_LENGTH
/ sizeof(struct qaic_manage_trans_hdr
))
1257 ret
= qaic_manage_msg_xfer(qdev
, usr
, user_msg
, &resources
, &rsp
);
1260 /* dma_cont should be the only transaction if present */
1261 if (le32_to_cpu(rsp
->hdr
.count
) == 1) {
1262 dma_cont
= (struct wire_trans_dma_xfer_cont
*)rsp
->data
;
1263 if (le32_to_cpu(dma_cont
->hdr
.type
) != QAIC_TRANS_DMA_XFER_CONT
)
1267 if (le32_to_cpu(dma_cont
->dma_chunk_id
) == resources
.dma_chunk_id
&&
1268 le64_to_cpu(dma_cont
->xferred_size
) == resources
.xferred_dma_size
) {
1270 goto dma_xfer_continue
;
1274 goto dma_cont_failed
;
1277 ret
= decode_message(qdev
, user_msg
, rsp
, &resources
, usr
);
1280 free_dbc_buf(qdev
, &resources
);
1285 int qaic_manage_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
1287 struct qaic_manage_msg
*user_msg
= data
;
1288 struct qaic_device
*qdev
;
1289 struct manage_msg
*msg
;
1290 struct qaic_user
*usr
;
1291 u8 __user
*user_data
;
1296 if (user_msg
->len
> QAIC_MANAGE_MAX_MSG_LENGTH
)
1299 usr
= file_priv
->driver_priv
;
1301 usr_rcu_id
= srcu_read_lock(&usr
->qddev_lock
);
1303 srcu_read_unlock(&usr
->qddev_lock
, usr_rcu_id
);
1307 qdev
= usr
->qddev
->qdev
;
1309 qdev_rcu_id
= srcu_read_lock(&qdev
->dev_lock
);
1310 if (qdev
->dev_state
!= QAIC_ONLINE
) {
1311 srcu_read_unlock(&qdev
->dev_lock
, qdev_rcu_id
);
1312 srcu_read_unlock(&usr
->qddev_lock
, usr_rcu_id
);
1316 msg
= kzalloc(QAIC_MANAGE_MAX_MSG_LENGTH
+ sizeof(*msg
), GFP_KERNEL
);
1322 msg
->len
= user_msg
->len
;
1323 msg
->count
= user_msg
->count
;
1325 user_data
= u64_to_user_ptr(user_msg
->data
);
1327 if (copy_from_user(msg
->data
, user_data
, user_msg
->len
)) {
1332 ret
= qaic_manage(qdev
, usr
, msg
);
1335 * If the qaic_manage() is successful then we copy the message onto
1336 * userspace memory but we have an exception for -ECANCELED.
1337 * For -ECANCELED, it means that device has NACKed the message with a
1338 * status error code which userspace would like to know.
1340 if (ret
== -ECANCELED
|| !ret
) {
1341 if (copy_to_user(user_data
, msg
->data
, msg
->len
)) {
1344 user_msg
->len
= msg
->len
;
1345 user_msg
->count
= msg
->count
;
1352 srcu_read_unlock(&qdev
->dev_lock
, qdev_rcu_id
);
1353 srcu_read_unlock(&usr
->qddev_lock
, usr_rcu_id
);
1357 int get_cntl_version(struct qaic_device
*qdev
, struct qaic_user
*usr
, u16
*major
, u16
*minor
)
1359 struct qaic_manage_trans_status_from_dev
*status_result
;
1360 struct qaic_manage_trans_status_to_dev
*status_query
;
1361 struct manage_msg
*user_msg
;
1364 user_msg
= kmalloc(sizeof(*user_msg
) + sizeof(*status_result
), GFP_KERNEL
);
1369 user_msg
->len
= sizeof(*status_query
);
1370 user_msg
->count
= 1;
1372 status_query
= (struct qaic_manage_trans_status_to_dev
*)user_msg
->data
;
1373 status_query
->hdr
.type
= QAIC_TRANS_STATUS_FROM_USR
;
1374 status_query
->hdr
.len
= sizeof(status_query
->hdr
);
1376 ret
= qaic_manage(qdev
, usr
, user_msg
);
1378 goto kfree_user_msg
;
1379 status_result
= (struct qaic_manage_trans_status_from_dev
*)user_msg
->data
;
1380 *major
= status_result
->major
;
1381 *minor
= status_result
->minor
;
1383 if (status_result
->status_flags
& BIT(0)) { /* device is using CRC */
1384 /* By default qdev->gen_crc is programmed to generate CRC */
1385 qdev
->valid_crc
= valid_crc
;
1387 /* By default qdev->valid_crc is programmed to bypass CRC */
1388 qdev
->gen_crc
= gen_crc_stub
;
1397 static void resp_worker(struct work_struct
*work
)
1399 struct resp_work
*resp
= container_of(work
, struct resp_work
, work
);
1400 struct qaic_device
*qdev
= resp
->qdev
;
1401 struct wire_msg
*msg
= resp
->buf
;
1402 struct xfer_queue_elem
*elem
;
1403 struct xfer_queue_elem
*i
;
1406 mutex_lock(&qdev
->cntl_mutex
);
1407 list_for_each_entry_safe(elem
, i
, &qdev
->cntl_xfer_list
, list
) {
1408 if (elem
->seq_num
== le32_to_cpu(msg
->hdr
.sequence_number
)) {
1410 list_del_init(&elem
->list
);
1412 complete_all(&elem
->xfer_done
);
1416 mutex_unlock(&qdev
->cntl_mutex
);
1419 /* request must have timed out, drop packet */
1425 static void free_wrapper_from_list(struct wrapper_list
*wrappers
, struct wrapper_msg
*wrapper
)
1427 bool all_done
= false;
1429 spin_lock(&wrappers
->lock
);
1430 kref_put(&wrapper
->ref_count
, free_wrapper
);
1431 all_done
= list_empty(&wrappers
->list
);
1432 spin_unlock(&wrappers
->lock
);
1438 void qaic_mhi_ul_xfer_cb(struct mhi_device
*mhi_dev
, struct mhi_result
*mhi_result
)
1440 struct wire_msg
*msg
= mhi_result
->buf_addr
;
1441 struct wrapper_msg
*wrapper
= container_of(msg
, struct wrapper_msg
, msg
);
1443 free_wrapper_from_list(wrapper
->head
, wrapper
);
1446 void qaic_mhi_dl_xfer_cb(struct mhi_device
*mhi_dev
, struct mhi_result
*mhi_result
)
1448 struct qaic_device
*qdev
= dev_get_drvdata(&mhi_dev
->dev
);
1449 struct wire_msg
*msg
= mhi_result
->buf_addr
;
1450 struct resp_work
*resp
;
1452 if (mhi_result
->transaction_status
|| msg
->hdr
.magic_number
!= MANAGE_MAGIC_NUMBER
) {
1457 resp
= kmalloc(sizeof(*resp
), GFP_ATOMIC
);
1463 INIT_WORK(&resp
->work
, resp_worker
);
1466 queue_work(qdev
->cntl_wq
, &resp
->work
);
1469 int qaic_control_open(struct qaic_device
*qdev
)
1474 qdev
->cntl_lost_buf
= false;
1476 * By default qaic should assume that device has CRC enabled.
1477 * Qaic comes to know if device has CRC enabled or disabled during the
1478 * device status transaction, which is the first transaction performed
1479 * on control channel.
1481 * So CRC validation of first device status transaction response is
1482 * ignored (by calling valid_crc_stub) and is done later during decoding
1483 * if device has CRC enabled.
1484 * Now that qaic knows whether device has CRC enabled or not it acts
1487 qdev
->gen_crc
= gen_crc
;
1488 qdev
->valid_crc
= valid_crc_stub
;
1490 return mhi_prepare_for_transfer(qdev
->cntl_ch
);
1493 void qaic_control_close(struct qaic_device
*qdev
)
1495 mhi_unprepare_from_transfer(qdev
->cntl_ch
);
1498 void qaic_release_usr(struct qaic_device
*qdev
, struct qaic_user
*usr
)
1500 struct wire_trans_terminate_to_dev
*trans
;
1501 struct wrapper_list
*wrappers
;
1502 struct wrapper_msg
*wrapper
;
1503 struct wire_msg
*msg
;
1504 struct wire_msg
*rsp
;
1506 wrappers
= alloc_wrapper_list();
1510 wrapper
= add_wrapper(wrappers
, sizeof(*wrapper
) + sizeof(*msg
) + sizeof(*trans
));
1514 msg
= &wrapper
->msg
;
1516 trans
= (struct wire_trans_terminate_to_dev
*)msg
->data
;
1518 trans
->hdr
.type
= cpu_to_le32(QAIC_TRANS_TERMINATE_TO_DEV
);
1519 trans
->hdr
.len
= cpu_to_le32(sizeof(*trans
));
1520 trans
->handle
= cpu_to_le32(usr
->handle
);
1522 mutex_lock(&qdev
->cntl_mutex
);
1523 wrapper
->len
= sizeof(msg
->hdr
) + sizeof(*trans
);
1524 msg
->hdr
.magic_number
= MANAGE_MAGIC_NUMBER
;
1525 msg
->hdr
.sequence_number
= cpu_to_le32(qdev
->next_seq_num
++);
1526 msg
->hdr
.len
= cpu_to_le32(wrapper
->len
);
1527 msg
->hdr
.count
= cpu_to_le32(1);
1528 msg
->hdr
.handle
= cpu_to_le32(usr
->handle
);
1529 msg
->hdr
.padding
= cpu_to_le32(0);
1530 msg
->hdr
.crc32
= cpu_to_le32(qdev
->gen_crc(wrappers
));
1533 * msg_xfer releases the mutex
1534 * We don't care about the return of msg_xfer since we will not do
1535 * anything different based on what happens.
1536 * We ignore pending signals since one will be set if the user is
1537 * killed, and we need give the device a chance to cleanup, otherwise
1538 * DMA may still be in progress when we return.
1540 rsp
= msg_xfer(qdev
, wrappers
, qdev
->next_seq_num
- 1, true);
1543 free_wrapper_from_list(wrappers
, wrapper
);
1546 void wake_all_cntl(struct qaic_device
*qdev
)
1548 struct xfer_queue_elem
*elem
;
1549 struct xfer_queue_elem
*i
;
1551 mutex_lock(&qdev
->cntl_mutex
);
1552 list_for_each_entry_safe(elem
, i
, &qdev
->cntl_xfer_list
, list
) {
1553 list_del_init(&elem
->list
);
1554 complete_all(&elem
->xfer_done
);
1556 mutex_unlock(&qdev
->cntl_mutex
);