lkdtm: Add Control Flow Integrity test
[linux/fpc-iii.git] / drivers / usb / usbip / stub_rx.c
blobb0a855acafa379899aa48220dad062c89874771d
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2003-2008 Takahiro Hirofuchi
4 */
6 #include <asm/byteorder.h>
7 #include <linux/kthread.h>
8 #include <linux/usb.h>
9 #include <linux/usb/hcd.h>
11 #include "usbip_common.h"
12 #include "stub.h"
14 static int is_clear_halt_cmd(struct urb *urb)
16 struct usb_ctrlrequest *req;
18 req = (struct usb_ctrlrequest *) urb->setup_packet;
20 return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
21 (req->bRequestType == USB_RECIP_ENDPOINT) &&
22 (req->wValue == USB_ENDPOINT_HALT);
25 static int is_set_interface_cmd(struct urb *urb)
27 struct usb_ctrlrequest *req;
29 req = (struct usb_ctrlrequest *) urb->setup_packet;
31 return (req->bRequest == USB_REQ_SET_INTERFACE) &&
32 (req->bRequestType == USB_RECIP_INTERFACE);
35 static int is_set_configuration_cmd(struct urb *urb)
37 struct usb_ctrlrequest *req;
39 req = (struct usb_ctrlrequest *) urb->setup_packet;
41 return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
42 (req->bRequestType == USB_RECIP_DEVICE);
45 static int is_reset_device_cmd(struct urb *urb)
47 struct usb_ctrlrequest *req;
48 __u16 value;
49 __u16 index;
51 req = (struct usb_ctrlrequest *) urb->setup_packet;
52 value = le16_to_cpu(req->wValue);
53 index = le16_to_cpu(req->wIndex);
55 if ((req->bRequest == USB_REQ_SET_FEATURE) &&
56 (req->bRequestType == USB_RT_PORT) &&
57 (value == USB_PORT_FEAT_RESET)) {
58 usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
59 return 1;
60 } else
61 return 0;
64 static int tweak_clear_halt_cmd(struct urb *urb)
66 struct usb_ctrlrequest *req;
67 int target_endp;
68 int target_dir;
69 int target_pipe;
70 int ret;
72 req = (struct usb_ctrlrequest *) urb->setup_packet;
75 * The stalled endpoint is specified in the wIndex value. The endpoint
76 * of the urb is the target of this clear_halt request (i.e., control
77 * endpoint).
79 target_endp = le16_to_cpu(req->wIndex) & 0x000f;
81 /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
82 target_dir = le16_to_cpu(req->wIndex) & 0x0080;
84 if (target_dir)
85 target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
86 else
87 target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
89 ret = usb_clear_halt(urb->dev, target_pipe);
90 if (ret < 0)
91 dev_err(&urb->dev->dev,
92 "usb_clear_halt error: devnum %d endp %d ret %d\n",
93 urb->dev->devnum, target_endp, ret);
94 else
95 dev_info(&urb->dev->dev,
96 "usb_clear_halt done: devnum %d endp %d\n",
97 urb->dev->devnum, target_endp);
99 return ret;
102 static int tweak_set_interface_cmd(struct urb *urb)
104 struct usb_ctrlrequest *req;
105 __u16 alternate;
106 __u16 interface;
107 int ret;
109 req = (struct usb_ctrlrequest *) urb->setup_packet;
110 alternate = le16_to_cpu(req->wValue);
111 interface = le16_to_cpu(req->wIndex);
113 usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
114 interface, alternate);
116 ret = usb_set_interface(urb->dev, interface, alternate);
117 if (ret < 0)
118 dev_err(&urb->dev->dev,
119 "usb_set_interface error: inf %u alt %u ret %d\n",
120 interface, alternate, ret);
121 else
122 dev_info(&urb->dev->dev,
123 "usb_set_interface done: inf %u alt %u\n",
124 interface, alternate);
126 return ret;
129 static int tweak_set_configuration_cmd(struct urb *urb)
131 struct stub_priv *priv = (struct stub_priv *) urb->context;
132 struct stub_device *sdev = priv->sdev;
133 struct usb_ctrlrequest *req;
134 __u16 config;
135 int err;
137 req = (struct usb_ctrlrequest *) urb->setup_packet;
138 config = le16_to_cpu(req->wValue);
140 err = usb_set_configuration(sdev->udev, config);
141 if (err && err != -ENODEV)
142 dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
143 config, err);
144 return 0;
147 static int tweak_reset_device_cmd(struct urb *urb)
149 struct stub_priv *priv = (struct stub_priv *) urb->context;
150 struct stub_device *sdev = priv->sdev;
152 dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
154 if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
155 dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
156 return 0;
158 usb_reset_device(sdev->udev);
159 usb_unlock_device(sdev->udev);
161 return 0;
165 * clear_halt, set_interface, and set_configuration require special tricks.
167 static void tweak_special_requests(struct urb *urb)
169 if (!urb || !urb->setup_packet)
170 return;
172 if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
173 return;
175 if (is_clear_halt_cmd(urb))
176 /* tweak clear_halt */
177 tweak_clear_halt_cmd(urb);
179 else if (is_set_interface_cmd(urb))
180 /* tweak set_interface */
181 tweak_set_interface_cmd(urb);
183 else if (is_set_configuration_cmd(urb))
184 /* tweak set_configuration */
185 tweak_set_configuration_cmd(urb);
187 else if (is_reset_device_cmd(urb))
188 tweak_reset_device_cmd(urb);
189 else
190 usbip_dbg_stub_rx("no need to tweak\n");
194 * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
195 * By unlinking the urb asynchronously, stub_rx can continuously
196 * process coming urbs. Even if the urb is unlinked, its completion
197 * handler will be called and stub_tx will send a return pdu.
199 * See also comments about unlinking strategy in vhci_hcd.c.
201 static int stub_recv_cmd_unlink(struct stub_device *sdev,
202 struct usbip_header *pdu)
204 int ret;
205 unsigned long flags;
206 struct stub_priv *priv;
208 spin_lock_irqsave(&sdev->priv_lock, flags);
210 list_for_each_entry(priv, &sdev->priv_init, list) {
211 if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
212 continue;
215 * This matched urb is not completed yet (i.e., be in
216 * flight in usb hcd hardware/driver). Now we are
217 * cancelling it. The unlinking flag means that we are
218 * now not going to return the normal result pdu of a
219 * submission request, but going to return a result pdu
220 * of the unlink request.
222 priv->unlinking = 1;
225 * In the case that unlinking flag is on, prev->seqnum
226 * is changed from the seqnum of the cancelling urb to
227 * the seqnum of the unlink request. This will be used
228 * to make the result pdu of the unlink request.
230 priv->seqnum = pdu->base.seqnum;
232 spin_unlock_irqrestore(&sdev->priv_lock, flags);
235 * usb_unlink_urb() is now out of spinlocking to avoid
236 * spinlock recursion since stub_complete() is
237 * sometimes called in this context but not in the
238 * interrupt context. If stub_complete() is executed
239 * before we call usb_unlink_urb(), usb_unlink_urb()
240 * will return an error value. In this case, stub_tx
241 * will return the result pdu of this unlink request
242 * though submission is completed and actual unlinking
243 * is not executed. OK?
245 /* In the above case, urb->status is not -ECONNRESET,
246 * so a driver in a client host will know the failure
247 * of the unlink request ?
249 ret = usb_unlink_urb(priv->urb);
250 if (ret != -EINPROGRESS)
251 dev_err(&priv->urb->dev->dev,
252 "failed to unlink a urb # %lu, ret %d\n",
253 priv->seqnum, ret);
255 return 0;
258 usbip_dbg_stub_rx("seqnum %d is not pending\n",
259 pdu->u.cmd_unlink.seqnum);
262 * The urb of the unlink target is not found in priv_init queue. It was
263 * already completed and its results is/was going to be sent by a
264 * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
265 * return the completeness of this unlink request to vhci_hcd.
267 stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
269 spin_unlock_irqrestore(&sdev->priv_lock, flags);
271 return 0;
274 static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
276 struct usbip_device *ud = &sdev->ud;
277 int valid = 0;
279 if (pdu->base.devid == sdev->devid) {
280 spin_lock_irq(&ud->lock);
281 if (ud->status == SDEV_ST_USED) {
282 /* A request is valid. */
283 valid = 1;
285 spin_unlock_irq(&ud->lock);
288 return valid;
291 static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
292 struct usbip_header *pdu)
294 struct stub_priv *priv;
295 struct usbip_device *ud = &sdev->ud;
296 unsigned long flags;
298 spin_lock_irqsave(&sdev->priv_lock, flags);
300 priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
301 if (!priv) {
302 dev_err(&sdev->udev->dev, "alloc stub_priv\n");
303 spin_unlock_irqrestore(&sdev->priv_lock, flags);
304 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
305 return NULL;
308 priv->seqnum = pdu->base.seqnum;
309 priv->sdev = sdev;
312 * After a stub_priv is linked to a list_head,
313 * our error handler can free allocated data.
315 list_add_tail(&priv->list, &sdev->priv_init);
317 spin_unlock_irqrestore(&sdev->priv_lock, flags);
319 return priv;
322 static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
324 struct usb_device *udev = sdev->udev;
325 struct usb_host_endpoint *ep;
326 struct usb_endpoint_descriptor *epd = NULL;
327 int epnum = pdu->base.ep;
328 int dir = pdu->base.direction;
330 if (epnum < 0 || epnum > 15)
331 goto err_ret;
333 if (dir == USBIP_DIR_IN)
334 ep = udev->ep_in[epnum & 0x7f];
335 else
336 ep = udev->ep_out[epnum & 0x7f];
337 if (!ep)
338 goto err_ret;
340 epd = &ep->desc;
342 if (usb_endpoint_xfer_control(epd)) {
343 if (dir == USBIP_DIR_OUT)
344 return usb_sndctrlpipe(udev, epnum);
345 else
346 return usb_rcvctrlpipe(udev, epnum);
349 if (usb_endpoint_xfer_bulk(epd)) {
350 if (dir == USBIP_DIR_OUT)
351 return usb_sndbulkpipe(udev, epnum);
352 else
353 return usb_rcvbulkpipe(udev, epnum);
356 if (usb_endpoint_xfer_int(epd)) {
357 if (dir == USBIP_DIR_OUT)
358 return usb_sndintpipe(udev, epnum);
359 else
360 return usb_rcvintpipe(udev, epnum);
363 if (usb_endpoint_xfer_isoc(epd)) {
364 /* validate number of packets */
365 if (pdu->u.cmd_submit.number_of_packets < 0 ||
366 pdu->u.cmd_submit.number_of_packets >
367 USBIP_MAX_ISO_PACKETS) {
368 dev_err(&sdev->udev->dev,
369 "CMD_SUBMIT: isoc invalid num packets %d\n",
370 pdu->u.cmd_submit.number_of_packets);
371 return -1;
373 if (dir == USBIP_DIR_OUT)
374 return usb_sndisocpipe(udev, epnum);
375 else
376 return usb_rcvisocpipe(udev, epnum);
379 err_ret:
380 /* NOT REACHED */
381 dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
382 return -1;
385 static void masking_bogus_flags(struct urb *urb)
387 int xfertype;
388 struct usb_device *dev;
389 struct usb_host_endpoint *ep;
390 int is_out;
391 unsigned int allowed;
393 if (!urb || urb->hcpriv || !urb->complete)
394 return;
395 dev = urb->dev;
396 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
397 return;
399 ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
400 [usb_pipeendpoint(urb->pipe)];
401 if (!ep)
402 return;
404 xfertype = usb_endpoint_type(&ep->desc);
405 if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
406 struct usb_ctrlrequest *setup =
407 (struct usb_ctrlrequest *) urb->setup_packet;
409 if (!setup)
410 return;
411 is_out = !(setup->bRequestType & USB_DIR_IN) ||
412 !setup->wLength;
413 } else {
414 is_out = usb_endpoint_dir_out(&ep->desc);
417 /* enforce simple/standard policy */
418 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
419 URB_DIR_MASK | URB_FREE_BUFFER);
420 switch (xfertype) {
421 case USB_ENDPOINT_XFER_BULK:
422 if (is_out)
423 allowed |= URB_ZERO_PACKET;
424 /* FALLTHROUGH */
425 default: /* all non-iso endpoints */
426 if (!is_out)
427 allowed |= URB_SHORT_NOT_OK;
428 break;
429 case USB_ENDPOINT_XFER_ISOC:
430 allowed |= URB_ISO_ASAP;
431 break;
433 urb->transfer_flags &= allowed;
436 static void stub_recv_cmd_submit(struct stub_device *sdev,
437 struct usbip_header *pdu)
439 int ret;
440 struct stub_priv *priv;
441 struct usbip_device *ud = &sdev->ud;
442 struct usb_device *udev = sdev->udev;
443 int pipe = get_pipe(sdev, pdu);
445 if (pipe == -1)
446 return;
448 priv = stub_priv_alloc(sdev, pdu);
449 if (!priv)
450 return;
452 /* setup a urb */
453 if (usb_pipeisoc(pipe))
454 priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
455 GFP_KERNEL);
456 else
457 priv->urb = usb_alloc_urb(0, GFP_KERNEL);
459 if (!priv->urb) {
460 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
461 return;
464 /* allocate urb transfer buffer, if needed */
465 if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
466 priv->urb->transfer_buffer =
467 kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
468 GFP_KERNEL);
469 if (!priv->urb->transfer_buffer) {
470 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
471 return;
475 /* copy urb setup packet */
476 priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
477 GFP_KERNEL);
478 if (!priv->urb->setup_packet) {
479 dev_err(&udev->dev, "allocate setup_packet\n");
480 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
481 return;
484 /* set other members from the base header of pdu */
485 priv->urb->context = (void *) priv;
486 priv->urb->dev = udev;
487 priv->urb->pipe = pipe;
488 priv->urb->complete = stub_complete;
490 usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0);
493 if (usbip_recv_xbuff(ud, priv->urb) < 0)
494 return;
496 if (usbip_recv_iso(ud, priv->urb) < 0)
497 return;
499 /* no need to submit an intercepted request, but harmless? */
500 tweak_special_requests(priv->urb);
502 masking_bogus_flags(priv->urb);
503 /* urb is now ready to submit */
504 ret = usb_submit_urb(priv->urb, GFP_KERNEL);
506 if (ret == 0)
507 usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
508 pdu->base.seqnum);
509 else {
510 dev_err(&udev->dev, "submit_urb error, %d\n", ret);
511 usbip_dump_header(pdu);
512 usbip_dump_urb(priv->urb);
515 * Pessimistic.
516 * This connection will be discarded.
518 usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
521 usbip_dbg_stub_rx("Leave\n");
524 /* recv a pdu */
525 static void stub_rx_pdu(struct usbip_device *ud)
527 int ret;
528 struct usbip_header pdu;
529 struct stub_device *sdev = container_of(ud, struct stub_device, ud);
530 struct device *dev = &sdev->udev->dev;
532 usbip_dbg_stub_rx("Enter\n");
534 memset(&pdu, 0, sizeof(pdu));
536 /* receive a pdu header */
537 ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
538 if (ret != sizeof(pdu)) {
539 dev_err(dev, "recv a header, %d\n", ret);
540 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
541 return;
544 usbip_header_correct_endian(&pdu, 0);
546 if (usbip_dbg_flag_stub_rx)
547 usbip_dump_header(&pdu);
549 if (!valid_request(sdev, &pdu)) {
550 dev_err(dev, "recv invalid request\n");
551 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
552 return;
555 switch (pdu.base.command) {
556 case USBIP_CMD_UNLINK:
557 stub_recv_cmd_unlink(sdev, &pdu);
558 break;
560 case USBIP_CMD_SUBMIT:
561 stub_recv_cmd_submit(sdev, &pdu);
562 break;
564 default:
565 /* NOTREACHED */
566 dev_err(dev, "unknown pdu\n");
567 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
568 break;
572 int stub_rx_loop(void *data)
574 struct usbip_device *ud = data;
576 while (!kthread_should_stop()) {
577 if (usbip_event_happened(ud))
578 break;
580 stub_rx_pdu(ud);
583 return 0;