1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2003-2008 Takahiro Hirofuchi
6 #include <linux/kthread.h>
7 #include <linux/socket.h>
9 #include "usbip_common.h"
12 static void stub_free_priv_and_urb(struct stub_priv
*priv
)
14 struct urb
*urb
= priv
->urb
;
16 kfree(urb
->setup_packet
);
17 urb
->setup_packet
= NULL
;
19 kfree(urb
->transfer_buffer
);
20 urb
->transfer_buffer
= NULL
;
22 list_del(&priv
->list
);
23 kmem_cache_free(stub_priv_cache
, priv
);
27 /* be in spin_lock_irqsave(&sdev->priv_lock, flags) */
28 void stub_enqueue_ret_unlink(struct stub_device
*sdev
, __u32 seqnum
,
31 struct stub_unlink
*unlink
;
33 unlink
= kzalloc(sizeof(struct stub_unlink
), GFP_ATOMIC
);
35 usbip_event_add(&sdev
->ud
, VDEV_EVENT_ERROR_MALLOC
);
39 unlink
->seqnum
= seqnum
;
40 unlink
->status
= status
;
42 list_add_tail(&unlink
->list
, &sdev
->unlink_tx
);
46 * stub_complete - completion handler of a usbip urb
47 * @urb: pointer to the urb completed
49 * When a urb has completed, the USB core driver calls this function mostly in
50 * the interrupt context. To return the result of a urb, the completed urb is
51 * linked to the pending list of returning.
54 void stub_complete(struct urb
*urb
)
56 struct stub_priv
*priv
= (struct stub_priv
*) urb
->context
;
57 struct stub_device
*sdev
= priv
->sdev
;
60 usbip_dbg_stub_tx("complete! status %d\n", urb
->status
);
62 switch (urb
->status
) {
67 dev_info(&urb
->dev
->dev
,
68 "stopped by a call to usb_kill_urb() because of cleaning up a virtual connection\n");
71 dev_info(&urb
->dev
->dev
,
72 "unlinked by a call to usb_unlink_urb()\n");
75 dev_info(&urb
->dev
->dev
, "endpoint %d is stalled\n",
76 usb_pipeendpoint(urb
->pipe
));
79 dev_info(&urb
->dev
->dev
, "device removed?\n");
82 dev_info(&urb
->dev
->dev
,
83 "urb completion with non-zero status %d\n",
88 /* link a urb to the queue of tx. */
89 spin_lock_irqsave(&sdev
->priv_lock
, flags
);
90 if (sdev
->ud
.tcp_socket
== NULL
) {
91 usbip_dbg_stub_tx("ignore urb for closed connection\n");
92 /* It will be freed in stub_device_cleanup_urbs(). */
93 } else if (priv
->unlinking
) {
94 stub_enqueue_ret_unlink(sdev
, priv
->seqnum
, urb
->status
);
95 stub_free_priv_and_urb(priv
);
97 list_move_tail(&priv
->list
, &sdev
->priv_tx
);
99 spin_unlock_irqrestore(&sdev
->priv_lock
, flags
);
101 /* wake up tx_thread */
102 wake_up(&sdev
->tx_waitq
);
105 static inline void setup_base_pdu(struct usbip_header_basic
*base
,
106 __u32 command
, __u32 seqnum
)
108 base
->command
= command
;
109 base
->seqnum
= seqnum
;
115 static void setup_ret_submit_pdu(struct usbip_header
*rpdu
, struct urb
*urb
)
117 struct stub_priv
*priv
= (struct stub_priv
*) urb
->context
;
119 setup_base_pdu(&rpdu
->base
, USBIP_RET_SUBMIT
, priv
->seqnum
);
120 usbip_pack_pdu(rpdu
, urb
, USBIP_RET_SUBMIT
, 1);
123 static void setup_ret_unlink_pdu(struct usbip_header
*rpdu
,
124 struct stub_unlink
*unlink
)
126 setup_base_pdu(&rpdu
->base
, USBIP_RET_UNLINK
, unlink
->seqnum
);
127 rpdu
->u
.ret_unlink
.status
= unlink
->status
;
130 static struct stub_priv
*dequeue_from_priv_tx(struct stub_device
*sdev
)
133 struct stub_priv
*priv
, *tmp
;
135 spin_lock_irqsave(&sdev
->priv_lock
, flags
);
137 list_for_each_entry_safe(priv
, tmp
, &sdev
->priv_tx
, list
) {
138 list_move_tail(&priv
->list
, &sdev
->priv_free
);
139 spin_unlock_irqrestore(&sdev
->priv_lock
, flags
);
143 spin_unlock_irqrestore(&sdev
->priv_lock
, flags
);
148 static int stub_send_ret_submit(struct stub_device
*sdev
)
151 struct stub_priv
*priv
, *tmp
;
156 size_t total_size
= 0;
158 while ((priv
= dequeue_from_priv_tx(sdev
)) != NULL
) {
160 struct urb
*urb
= priv
->urb
;
161 struct usbip_header pdu_header
;
162 struct usbip_iso_packet_descriptor
*iso_buffer
= NULL
;
163 struct kvec
*iov
= NULL
;
167 memset(&pdu_header
, 0, sizeof(pdu_header
));
168 memset(&msg
, 0, sizeof(msg
));
170 if (urb
->actual_length
> 0 && !urb
->transfer_buffer
) {
171 dev_err(&sdev
->udev
->dev
,
172 "urb: actual_length %d transfer_buffer null\n",
177 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
)
178 iovnum
= 2 + urb
->number_of_packets
;
182 iov
= kcalloc(iovnum
, sizeof(struct kvec
), GFP_KERNEL
);
185 usbip_event_add(&sdev
->ud
, SDEV_EVENT_ERROR_MALLOC
);
191 /* 1. setup usbip_header */
192 setup_ret_submit_pdu(&pdu_header
, urb
);
193 usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
194 pdu_header
.base
.seqnum
);
195 usbip_header_correct_endian(&pdu_header
, 1);
197 iov
[iovnum
].iov_base
= &pdu_header
;
198 iov
[iovnum
].iov_len
= sizeof(pdu_header
);
200 txsize
+= sizeof(pdu_header
);
202 /* 2. setup transfer buffer */
203 if (usb_pipein(urb
->pipe
) &&
204 usb_pipetype(urb
->pipe
) != PIPE_ISOCHRONOUS
&&
205 urb
->actual_length
> 0) {
206 iov
[iovnum
].iov_base
= urb
->transfer_buffer
;
207 iov
[iovnum
].iov_len
= urb
->actual_length
;
209 txsize
+= urb
->actual_length
;
210 } else if (usb_pipein(urb
->pipe
) &&
211 usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
213 * For isochronous packets: actual length is the sum of
214 * the actual length of the individual, packets, but as
215 * the packet offsets are not changed there will be
216 * padding between the packets. To optimally use the
217 * bandwidth the padding is not transmitted.
222 for (i
= 0; i
< urb
->number_of_packets
; i
++) {
223 iov
[iovnum
].iov_base
= urb
->transfer_buffer
+
224 urb
->iso_frame_desc
[i
].offset
;
225 iov
[iovnum
].iov_len
=
226 urb
->iso_frame_desc
[i
].actual_length
;
228 txsize
+= urb
->iso_frame_desc
[i
].actual_length
;
231 if (txsize
!= sizeof(pdu_header
) + urb
->actual_length
) {
232 dev_err(&sdev
->udev
->dev
,
233 "actual length of urb %d does not match iso packet sizes %zu\n",
235 txsize
-sizeof(pdu_header
));
237 usbip_event_add(&sdev
->ud
,
238 SDEV_EVENT_ERROR_TCP
);
243 /* 3. setup iso_packet_descriptor */
244 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
247 iso_buffer
= usbip_alloc_iso_desc_pdu(urb
, &len
);
249 usbip_event_add(&sdev
->ud
,
250 SDEV_EVENT_ERROR_MALLOC
);
255 iov
[iovnum
].iov_base
= iso_buffer
;
256 iov
[iovnum
].iov_len
= len
;
261 ret
= kernel_sendmsg(sdev
->ud
.tcp_socket
, &msg
,
262 iov
, iovnum
, txsize
);
264 dev_err(&sdev
->udev
->dev
,
265 "sendmsg failed!, retval %d for %zd\n",
269 usbip_event_add(&sdev
->ud
, SDEV_EVENT_ERROR_TCP
);
276 total_size
+= txsize
;
279 spin_lock_irqsave(&sdev
->priv_lock
, flags
);
280 list_for_each_entry_safe(priv
, tmp
, &sdev
->priv_free
, list
) {
281 stub_free_priv_and_urb(priv
);
283 spin_unlock_irqrestore(&sdev
->priv_lock
, flags
);
288 static struct stub_unlink
*dequeue_from_unlink_tx(struct stub_device
*sdev
)
291 struct stub_unlink
*unlink
, *tmp
;
293 spin_lock_irqsave(&sdev
->priv_lock
, flags
);
295 list_for_each_entry_safe(unlink
, tmp
, &sdev
->unlink_tx
, list
) {
296 list_move_tail(&unlink
->list
, &sdev
->unlink_free
);
297 spin_unlock_irqrestore(&sdev
->priv_lock
, flags
);
301 spin_unlock_irqrestore(&sdev
->priv_lock
, flags
);
306 static int stub_send_ret_unlink(struct stub_device
*sdev
)
309 struct stub_unlink
*unlink
, *tmp
;
315 size_t total_size
= 0;
317 while ((unlink
= dequeue_from_unlink_tx(sdev
)) != NULL
) {
319 struct usbip_header pdu_header
;
322 memset(&pdu_header
, 0, sizeof(pdu_header
));
323 memset(&msg
, 0, sizeof(msg
));
324 memset(&iov
, 0, sizeof(iov
));
326 usbip_dbg_stub_tx("setup ret unlink %lu\n", unlink
->seqnum
);
328 /* 1. setup usbip_header */
329 setup_ret_unlink_pdu(&pdu_header
, unlink
);
330 usbip_header_correct_endian(&pdu_header
, 1);
332 iov
[0].iov_base
= &pdu_header
;
333 iov
[0].iov_len
= sizeof(pdu_header
);
334 txsize
+= sizeof(pdu_header
);
336 ret
= kernel_sendmsg(sdev
->ud
.tcp_socket
, &msg
, iov
,
339 dev_err(&sdev
->udev
->dev
,
340 "sendmsg failed!, retval %d for %zd\n",
342 usbip_event_add(&sdev
->ud
, SDEV_EVENT_ERROR_TCP
);
346 usbip_dbg_stub_tx("send txdata\n");
347 total_size
+= txsize
;
350 spin_lock_irqsave(&sdev
->priv_lock
, flags
);
352 list_for_each_entry_safe(unlink
, tmp
, &sdev
->unlink_free
, list
) {
353 list_del(&unlink
->list
);
357 spin_unlock_irqrestore(&sdev
->priv_lock
, flags
);
362 int stub_tx_loop(void *data
)
364 struct usbip_device
*ud
= data
;
365 struct stub_device
*sdev
= container_of(ud
, struct stub_device
, ud
);
367 while (!kthread_should_stop()) {
368 if (usbip_event_happened(ud
))
372 * send_ret_submit comes earlier than send_ret_unlink. stub_rx
373 * looks at only priv_init queue. If the completion of a URB is
374 * earlier than the receive of CMD_UNLINK, priv is moved to
375 * priv_tx queue and stub_rx does not find the target priv. In
376 * this case, vhci_rx receives the result of the submit request
377 * and then receives the result of the unlink request. The
378 * result of the submit is given back to the usbcore as the
379 * completion of the unlink request. The request of the
380 * unlink is ignored. This is ok because a driver who calls
381 * usb_unlink_urb() understands the unlink was too late by
382 * getting the status of the given-backed URB which has the
383 * status of usb_submit_urb().
385 if (stub_send_ret_submit(sdev
) < 0)
388 if (stub_send_ret_unlink(sdev
) < 0)
391 wait_event_interruptible(sdev
->tx_waitq
,
392 (!list_empty(&sdev
->priv_tx
) ||
393 !list_empty(&sdev
->unlink_tx
) ||
394 kthread_should_stop()));