1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
4 * Copyright (C) 2015-2016 Samsung Electronics
5 * Igor Kotrasinski <i.kotrasinsk@samsung.com>
9 #include <linux/list.h>
10 #include <linux/kthread.h>
12 #include "usbip_common.h"
15 static inline void setup_base_pdu(struct usbip_header_basic
*base
,
16 __u32 command
, __u32 seqnum
)
18 base
->command
= command
;
19 base
->seqnum
= seqnum
;
25 static void setup_ret_submit_pdu(struct usbip_header
*rpdu
, struct urbp
*urb_p
)
27 setup_base_pdu(&rpdu
->base
, USBIP_RET_SUBMIT
, urb_p
->seqnum
);
28 usbip_pack_pdu(rpdu
, urb_p
->urb
, USBIP_RET_SUBMIT
, 1);
31 static void setup_ret_unlink_pdu(struct usbip_header
*rpdu
,
32 struct v_unlink
*unlink
)
34 setup_base_pdu(&rpdu
->base
, USBIP_RET_UNLINK
, unlink
->seqnum
);
35 rpdu
->u
.ret_unlink
.status
= unlink
->status
;
38 static int v_send_ret_unlink(struct vudc
*udc
, struct v_unlink
*unlink
)
45 struct usbip_header pdu_header
;
48 memset(&pdu_header
, 0, sizeof(pdu_header
));
49 memset(&msg
, 0, sizeof(msg
));
50 memset(&iov
, 0, sizeof(iov
));
52 /* 1. setup usbip_header */
53 setup_ret_unlink_pdu(&pdu_header
, unlink
);
54 usbip_header_correct_endian(&pdu_header
, 1);
56 iov
[0].iov_base
= &pdu_header
;
57 iov
[0].iov_len
= sizeof(pdu_header
);
58 txsize
+= sizeof(pdu_header
);
60 ret
= kernel_sendmsg(udc
->ud
.tcp_socket
, &msg
, iov
,
63 usbip_event_add(&udc
->ud
, VUDC_EVENT_ERROR_TCP
);
73 static int v_send_ret_submit(struct vudc
*udc
, struct urbp
*urb_p
)
75 struct urb
*urb
= urb_p
->urb
;
76 struct usbip_header pdu_header
;
77 struct usbip_iso_packet_descriptor
*iso_buffer
= NULL
;
78 struct kvec
*iov
= NULL
;
85 memset(&pdu_header
, 0, sizeof(pdu_header
));
86 memset(&msg
, 0, sizeof(msg
));
88 if (urb
->actual_length
> 0 && !urb
->transfer_buffer
) {
89 dev_err(&udc
->gadget
.dev
,
90 "urb: actual_length %d transfer_buffer null\n",
95 if (urb_p
->type
== USB_ENDPOINT_XFER_ISOC
)
96 iovnum
= 2 + urb
->number_of_packets
;
100 iov
= kcalloc(iovnum
, sizeof(*iov
), GFP_KERNEL
);
102 usbip_event_add(&udc
->ud
, VUDC_EVENT_ERROR_MALLOC
);
108 /* 1. setup usbip_header */
109 setup_ret_submit_pdu(&pdu_header
, urb_p
);
110 usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
111 pdu_header
.base
.seqnum
);
112 usbip_header_correct_endian(&pdu_header
, 1);
114 iov
[iovnum
].iov_base
= &pdu_header
;
115 iov
[iovnum
].iov_len
= sizeof(pdu_header
);
117 txsize
+= sizeof(pdu_header
);
119 /* 2. setup transfer buffer */
120 if (urb_p
->type
!= USB_ENDPOINT_XFER_ISOC
&&
121 usb_pipein(urb
->pipe
) && urb
->actual_length
> 0) {
122 iov
[iovnum
].iov_base
= urb
->transfer_buffer
;
123 iov
[iovnum
].iov_len
= urb
->actual_length
;
125 txsize
+= urb
->actual_length
;
126 } else if (urb_p
->type
== USB_ENDPOINT_XFER_ISOC
&&
127 usb_pipein(urb
->pipe
)) {
128 /* FIXME - copypasted from stub_tx, refactor */
131 for (i
= 0; i
< urb
->number_of_packets
; i
++) {
132 iov
[iovnum
].iov_base
= urb
->transfer_buffer
+
133 urb
->iso_frame_desc
[i
].offset
;
134 iov
[iovnum
].iov_len
=
135 urb
->iso_frame_desc
[i
].actual_length
;
137 txsize
+= urb
->iso_frame_desc
[i
].actual_length
;
140 if (txsize
!= sizeof(pdu_header
) + urb
->actual_length
) {
141 usbip_event_add(&udc
->ud
, VUDC_EVENT_ERROR_TCP
);
146 /* else - no buffer to send */
148 /* 3. setup iso_packet_descriptor */
149 if (urb_p
->type
== USB_ENDPOINT_XFER_ISOC
) {
152 iso_buffer
= usbip_alloc_iso_desc_pdu(urb
, &len
);
154 usbip_event_add(&udc
->ud
,
155 VUDC_EVENT_ERROR_MALLOC
);
160 iov
[iovnum
].iov_base
= iso_buffer
;
161 iov
[iovnum
].iov_len
= len
;
166 ret
= kernel_sendmsg(udc
->ud
.tcp_socket
, &msg
,
167 iov
, iovnum
, txsize
);
169 usbip_event_add(&udc
->ud
, VUDC_EVENT_ERROR_TCP
);
178 free_urbp_and_urb(urb_p
);
184 static int v_send_ret(struct vudc
*udc
)
188 size_t total_size
= 0;
191 spin_lock_irqsave(&udc
->lock_tx
, flags
);
192 while (!list_empty(&udc
->tx_queue
)) {
193 txi
= list_first_entry(&udc
->tx_queue
, struct tx_item
,
195 list_del(&txi
->tx_entry
);
196 spin_unlock_irqrestore(&udc
->lock_tx
, flags
);
200 ret
= v_send_ret_submit(udc
, txi
->s
);
203 ret
= v_send_ret_unlink(udc
, txi
->u
);
213 spin_lock_irqsave(&udc
->lock_tx
, flags
);
216 spin_unlock_irqrestore(&udc
->lock_tx
, flags
);
221 int v_tx_loop(void *data
)
223 struct usbip_device
*ud
= (struct usbip_device
*) data
;
224 struct vudc
*udc
= container_of(ud
, struct vudc
, ud
);
227 while (!kthread_should_stop()) {
228 if (usbip_event_happened(&udc
->ud
))
230 ret
= v_send_ret(udc
);
232 pr_warn("v_tx exit with error %d", ret
);
235 wait_event_interruptible(udc
->tx_waitq
,
236 (!list_empty(&udc
->tx_queue
) ||
237 kthread_should_stop()));
243 /* called with spinlocks held */
244 void v_enqueue_ret_unlink(struct vudc
*udc
, __u32 seqnum
, __u32 status
)
247 struct v_unlink
*unlink
;
249 txi
= kzalloc(sizeof(*txi
), GFP_ATOMIC
);
251 usbip_event_add(&udc
->ud
, VDEV_EVENT_ERROR_MALLOC
);
254 unlink
= kzalloc(sizeof(*unlink
), GFP_ATOMIC
);
257 usbip_event_add(&udc
->ud
, VDEV_EVENT_ERROR_MALLOC
);
261 unlink
->seqnum
= seqnum
;
262 unlink
->status
= status
;
263 txi
->type
= TX_UNLINK
;
266 list_add_tail(&txi
->tx_entry
, &udc
->tx_queue
);
269 /* called with spinlocks held */
270 void v_enqueue_ret_submit(struct vudc
*udc
, struct urbp
*urb_p
)
274 txi
= kzalloc(sizeof(*txi
), GFP_ATOMIC
);
276 usbip_event_add(&udc
->ud
, VDEV_EVENT_ERROR_MALLOC
);
280 txi
->type
= TX_SUBMIT
;
283 list_add_tail(&txi
->tx_entry
, &udc
->tx_queue
);