ARC: [plat-eznps] spinlock aware for MTM
[linux/fpc-iii.git] / drivers / usb / usb-skeleton.c
blobbb0bd732e29ab7369fb3865a4682a596b5f4459a
1 /*
2 * USB Skeleton driver - 2.2
4 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2.
10 * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
11 * but has been rewritten to be easier to read and use.
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/kref.h>
20 #include <linux/uaccess.h>
21 #include <linux/usb.h>
22 #include <linux/mutex.h>
25 /* Define these values to match your devices */
26 #define USB_SKEL_VENDOR_ID 0xfff0
27 #define USB_SKEL_PRODUCT_ID 0xfff0
29 /* table of devices that work with this driver */
30 static const struct usb_device_id skel_table[] = {
31 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
32 { } /* Terminating entry */
34 MODULE_DEVICE_TABLE(usb, skel_table);
37 /* Get a minor range for your devices from the usb maintainer */
38 #define USB_SKEL_MINOR_BASE 192
40 /* our private defines. if this grows any larger, use your own .h file */
41 #define MAX_TRANSFER (PAGE_SIZE - 512)
42 /* MAX_TRANSFER is chosen so that the VM is not stressed by
43 allocations > PAGE_SIZE and the number of packets in a page
44 is an integer 512 is the largest possible packet on EHCI */
45 #define WRITES_IN_FLIGHT 8
46 /* arbitrarily chosen */
48 /* Structure to hold all of our device specific stuff */
49 struct usb_skel {
50 struct usb_device *udev; /* the usb device for this device */
51 struct usb_interface *interface; /* the interface for this device */
52 struct semaphore limit_sem; /* limiting the number of writes in progress */
53 struct usb_anchor submitted; /* in case we need to retract our submissions */
54 struct urb *bulk_in_urb; /* the urb to read data with */
55 unsigned char *bulk_in_buffer; /* the buffer to receive data */
56 size_t bulk_in_size; /* the size of the receive buffer */
57 size_t bulk_in_filled; /* number of bytes in the buffer */
58 size_t bulk_in_copied; /* already copied to user space */
59 __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
60 __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
61 int errors; /* the last request tanked */
62 bool ongoing_read; /* a read is going on */
63 spinlock_t err_lock; /* lock for errors */
64 struct kref kref;
65 struct mutex io_mutex; /* synchronize I/O with disconnect */
66 wait_queue_head_t bulk_in_wait; /* to wait for an ongoing read */
68 #define to_skel_dev(d) container_of(d, struct usb_skel, kref)
70 static struct usb_driver skel_driver;
71 static void skel_draw_down(struct usb_skel *dev);
73 static void skel_delete(struct kref *kref)
75 struct usb_skel *dev = to_skel_dev(kref);
77 usb_free_urb(dev->bulk_in_urb);
78 usb_put_dev(dev->udev);
79 kfree(dev->bulk_in_buffer);
80 kfree(dev);
83 static int skel_open(struct inode *inode, struct file *file)
85 struct usb_skel *dev;
86 struct usb_interface *interface;
87 int subminor;
88 int retval = 0;
90 subminor = iminor(inode);
92 interface = usb_find_interface(&skel_driver, subminor);
93 if (!interface) {
94 pr_err("%s - error, can't find device for minor %d\n",
95 __func__, subminor);
96 retval = -ENODEV;
97 goto exit;
100 dev = usb_get_intfdata(interface);
101 if (!dev) {
102 retval = -ENODEV;
103 goto exit;
106 retval = usb_autopm_get_interface(interface);
107 if (retval)
108 goto exit;
110 /* increment our usage count for the device */
111 kref_get(&dev->kref);
113 /* save our object in the file's private structure */
114 file->private_data = dev;
116 exit:
117 return retval;
120 static int skel_release(struct inode *inode, struct file *file)
122 struct usb_skel *dev;
124 dev = file->private_data;
125 if (dev == NULL)
126 return -ENODEV;
128 /* allow the device to be autosuspended */
129 mutex_lock(&dev->io_mutex);
130 if (dev->interface)
131 usb_autopm_put_interface(dev->interface);
132 mutex_unlock(&dev->io_mutex);
134 /* decrement the count on our device */
135 kref_put(&dev->kref, skel_delete);
136 return 0;
139 static int skel_flush(struct file *file, fl_owner_t id)
141 struct usb_skel *dev;
142 int res;
144 dev = file->private_data;
145 if (dev == NULL)
146 return -ENODEV;
148 /* wait for io to stop */
149 mutex_lock(&dev->io_mutex);
150 skel_draw_down(dev);
152 /* read out errors, leave subsequent opens a clean slate */
153 spin_lock_irq(&dev->err_lock);
154 res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0;
155 dev->errors = 0;
156 spin_unlock_irq(&dev->err_lock);
158 mutex_unlock(&dev->io_mutex);
160 return res;
163 static void skel_read_bulk_callback(struct urb *urb)
165 struct usb_skel *dev;
167 dev = urb->context;
169 spin_lock(&dev->err_lock);
170 /* sync/async unlink faults aren't errors */
171 if (urb->status) {
172 if (!(urb->status == -ENOENT ||
173 urb->status == -ECONNRESET ||
174 urb->status == -ESHUTDOWN))
175 dev_err(&dev->interface->dev,
176 "%s - nonzero write bulk status received: %d\n",
177 __func__, urb->status);
179 dev->errors = urb->status;
180 } else {
181 dev->bulk_in_filled = urb->actual_length;
183 dev->ongoing_read = 0;
184 spin_unlock(&dev->err_lock);
186 wake_up_interruptible(&dev->bulk_in_wait);
189 static int skel_do_read_io(struct usb_skel *dev, size_t count)
191 int rv;
193 /* prepare a read */
194 usb_fill_bulk_urb(dev->bulk_in_urb,
195 dev->udev,
196 usb_rcvbulkpipe(dev->udev,
197 dev->bulk_in_endpointAddr),
198 dev->bulk_in_buffer,
199 min(dev->bulk_in_size, count),
200 skel_read_bulk_callback,
201 dev);
202 /* tell everybody to leave the URB alone */
203 spin_lock_irq(&dev->err_lock);
204 dev->ongoing_read = 1;
205 spin_unlock_irq(&dev->err_lock);
207 /* submit bulk in urb, which means no data to deliver */
208 dev->bulk_in_filled = 0;
209 dev->bulk_in_copied = 0;
211 /* do it */
212 rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
213 if (rv < 0) {
214 dev_err(&dev->interface->dev,
215 "%s - failed submitting read urb, error %d\n",
216 __func__, rv);
217 rv = (rv == -ENOMEM) ? rv : -EIO;
218 spin_lock_irq(&dev->err_lock);
219 dev->ongoing_read = 0;
220 spin_unlock_irq(&dev->err_lock);
223 return rv;
226 static ssize_t skel_read(struct file *file, char *buffer, size_t count,
227 loff_t *ppos)
229 struct usb_skel *dev;
230 int rv;
231 bool ongoing_io;
233 dev = file->private_data;
235 /* if we cannot read at all, return EOF */
236 if (!dev->bulk_in_urb || !count)
237 return 0;
239 /* no concurrent readers */
240 rv = mutex_lock_interruptible(&dev->io_mutex);
241 if (rv < 0)
242 return rv;
244 if (!dev->interface) { /* disconnect() was called */
245 rv = -ENODEV;
246 goto exit;
249 /* if IO is under way, we must not touch things */
250 retry:
251 spin_lock_irq(&dev->err_lock);
252 ongoing_io = dev->ongoing_read;
253 spin_unlock_irq(&dev->err_lock);
255 if (ongoing_io) {
256 /* nonblocking IO shall not wait */
257 if (file->f_flags & O_NONBLOCK) {
258 rv = -EAGAIN;
259 goto exit;
262 * IO may take forever
263 * hence wait in an interruptible state
265 rv = wait_event_interruptible(dev->bulk_in_wait, (!dev->ongoing_read));
266 if (rv < 0)
267 goto exit;
270 /* errors must be reported */
271 rv = dev->errors;
272 if (rv < 0) {
273 /* any error is reported once */
274 dev->errors = 0;
275 /* to preserve notifications about reset */
276 rv = (rv == -EPIPE) ? rv : -EIO;
277 /* report it */
278 goto exit;
282 * if the buffer is filled we may satisfy the read
283 * else we need to start IO
286 if (dev->bulk_in_filled) {
287 /* we had read data */
288 size_t available = dev->bulk_in_filled - dev->bulk_in_copied;
289 size_t chunk = min(available, count);
291 if (!available) {
293 * all data has been used
294 * actual IO needs to be done
296 rv = skel_do_read_io(dev, count);
297 if (rv < 0)
298 goto exit;
299 else
300 goto retry;
303 * data is available
304 * chunk tells us how much shall be copied
307 if (copy_to_user(buffer,
308 dev->bulk_in_buffer + dev->bulk_in_copied,
309 chunk))
310 rv = -EFAULT;
311 else
312 rv = chunk;
314 dev->bulk_in_copied += chunk;
317 * if we are asked for more than we have,
318 * we start IO but don't wait
320 if (available < count)
321 skel_do_read_io(dev, count - chunk);
322 } else {
323 /* no data in the buffer */
324 rv = skel_do_read_io(dev, count);
325 if (rv < 0)
326 goto exit;
327 else
328 goto retry;
330 exit:
331 mutex_unlock(&dev->io_mutex);
332 return rv;
335 static void skel_write_bulk_callback(struct urb *urb)
337 struct usb_skel *dev;
339 dev = urb->context;
341 /* sync/async unlink faults aren't errors */
342 if (urb->status) {
343 if (!(urb->status == -ENOENT ||
344 urb->status == -ECONNRESET ||
345 urb->status == -ESHUTDOWN))
346 dev_err(&dev->interface->dev,
347 "%s - nonzero write bulk status received: %d\n",
348 __func__, urb->status);
350 spin_lock(&dev->err_lock);
351 dev->errors = urb->status;
352 spin_unlock(&dev->err_lock);
355 /* free up our allocated buffer */
356 usb_free_coherent(urb->dev, urb->transfer_buffer_length,
357 urb->transfer_buffer, urb->transfer_dma);
358 up(&dev->limit_sem);
361 static ssize_t skel_write(struct file *file, const char *user_buffer,
362 size_t count, loff_t *ppos)
364 struct usb_skel *dev;
365 int retval = 0;
366 struct urb *urb = NULL;
367 char *buf = NULL;
368 size_t writesize = min(count, (size_t)MAX_TRANSFER);
370 dev = file->private_data;
372 /* verify that we actually have some data to write */
373 if (count == 0)
374 goto exit;
377 * limit the number of URBs in flight to stop a user from using up all
378 * RAM
380 if (!(file->f_flags & O_NONBLOCK)) {
381 if (down_interruptible(&dev->limit_sem)) {
382 retval = -ERESTARTSYS;
383 goto exit;
385 } else {
386 if (down_trylock(&dev->limit_sem)) {
387 retval = -EAGAIN;
388 goto exit;
392 spin_lock_irq(&dev->err_lock);
393 retval = dev->errors;
394 if (retval < 0) {
395 /* any error is reported once */
396 dev->errors = 0;
397 /* to preserve notifications about reset */
398 retval = (retval == -EPIPE) ? retval : -EIO;
400 spin_unlock_irq(&dev->err_lock);
401 if (retval < 0)
402 goto error;
404 /* create a urb, and a buffer for it, and copy the data to the urb */
405 urb = usb_alloc_urb(0, GFP_KERNEL);
406 if (!urb) {
407 retval = -ENOMEM;
408 goto error;
411 buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL,
412 &urb->transfer_dma);
413 if (!buf) {
414 retval = -ENOMEM;
415 goto error;
418 if (copy_from_user(buf, user_buffer, writesize)) {
419 retval = -EFAULT;
420 goto error;
423 /* this lock makes sure we don't submit URBs to gone devices */
424 mutex_lock(&dev->io_mutex);
425 if (!dev->interface) { /* disconnect() was called */
426 mutex_unlock(&dev->io_mutex);
427 retval = -ENODEV;
428 goto error;
431 /* initialize the urb properly */
432 usb_fill_bulk_urb(urb, dev->udev,
433 usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
434 buf, writesize, skel_write_bulk_callback, dev);
435 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
436 usb_anchor_urb(urb, &dev->submitted);
438 /* send the data out the bulk port */
439 retval = usb_submit_urb(urb, GFP_KERNEL);
440 mutex_unlock(&dev->io_mutex);
441 if (retval) {
442 dev_err(&dev->interface->dev,
443 "%s - failed submitting write urb, error %d\n",
444 __func__, retval);
445 goto error_unanchor;
449 * release our reference to this urb, the USB core will eventually free
450 * it entirely
452 usb_free_urb(urb);
455 return writesize;
457 error_unanchor:
458 usb_unanchor_urb(urb);
459 error:
460 if (urb) {
461 usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma);
462 usb_free_urb(urb);
464 up(&dev->limit_sem);
466 exit:
467 return retval;
470 static const struct file_operations skel_fops = {
471 .owner = THIS_MODULE,
472 .read = skel_read,
473 .write = skel_write,
474 .open = skel_open,
475 .release = skel_release,
476 .flush = skel_flush,
477 .llseek = noop_llseek,
481 * usb class driver info in order to get a minor number from the usb core,
482 * and to have the device registered with the driver core
484 static struct usb_class_driver skel_class = {
485 .name = "skel%d",
486 .fops = &skel_fops,
487 .minor_base = USB_SKEL_MINOR_BASE,
490 static int skel_probe(struct usb_interface *interface,
491 const struct usb_device_id *id)
493 struct usb_skel *dev;
494 struct usb_endpoint_descriptor *bulk_in, *bulk_out;
495 int retval;
497 /* allocate memory for our device state and initialize it */
498 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
499 if (!dev)
500 return -ENOMEM;
502 kref_init(&dev->kref);
503 sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
504 mutex_init(&dev->io_mutex);
505 spin_lock_init(&dev->err_lock);
506 init_usb_anchor(&dev->submitted);
507 init_waitqueue_head(&dev->bulk_in_wait);
509 dev->udev = usb_get_dev(interface_to_usbdev(interface));
510 dev->interface = interface;
512 /* set up the endpoint information */
513 /* use only the first bulk-in and bulk-out endpoints */
514 retval = usb_find_common_endpoints(interface->cur_altsetting,
515 &bulk_in, &bulk_out, NULL, NULL);
516 if (retval) {
517 dev_err(&interface->dev,
518 "Could not find both bulk-in and bulk-out endpoints\n");
519 goto error;
522 dev->bulk_in_size = usb_endpoint_maxp(bulk_in);
523 dev->bulk_in_endpointAddr = bulk_in->bEndpointAddress;
524 dev->bulk_in_buffer = kmalloc(dev->bulk_in_size, GFP_KERNEL);
525 if (!dev->bulk_in_buffer) {
526 retval = -ENOMEM;
527 goto error;
529 dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
530 if (!dev->bulk_in_urb) {
531 retval = -ENOMEM;
532 goto error;
535 dev->bulk_out_endpointAddr = bulk_out->bEndpointAddress;
537 /* save our data pointer in this interface device */
538 usb_set_intfdata(interface, dev);
540 /* we can register the device now, as it is ready */
541 retval = usb_register_dev(interface, &skel_class);
542 if (retval) {
543 /* something prevented us from registering this driver */
544 dev_err(&interface->dev,
545 "Not able to get a minor for this device.\n");
546 usb_set_intfdata(interface, NULL);
547 goto error;
550 /* let the user know what node this device is now attached to */
551 dev_info(&interface->dev,
552 "USB Skeleton device now attached to USBSkel-%d",
553 interface->minor);
554 return 0;
556 error:
557 /* this frees allocated memory */
558 kref_put(&dev->kref, skel_delete);
560 return retval;
563 static void skel_disconnect(struct usb_interface *interface)
565 struct usb_skel *dev;
566 int minor = interface->minor;
568 dev = usb_get_intfdata(interface);
569 usb_set_intfdata(interface, NULL);
571 /* give back our minor */
572 usb_deregister_dev(interface, &skel_class);
574 /* prevent more I/O from starting */
575 mutex_lock(&dev->io_mutex);
576 dev->interface = NULL;
577 mutex_unlock(&dev->io_mutex);
579 usb_kill_anchored_urbs(&dev->submitted);
581 /* decrement our usage count */
582 kref_put(&dev->kref, skel_delete);
584 dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
587 static void skel_draw_down(struct usb_skel *dev)
589 int time;
591 time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
592 if (!time)
593 usb_kill_anchored_urbs(&dev->submitted);
594 usb_kill_urb(dev->bulk_in_urb);
597 static int skel_suspend(struct usb_interface *intf, pm_message_t message)
599 struct usb_skel *dev = usb_get_intfdata(intf);
601 if (!dev)
602 return 0;
603 skel_draw_down(dev);
604 return 0;
607 static int skel_resume(struct usb_interface *intf)
609 return 0;
612 static int skel_pre_reset(struct usb_interface *intf)
614 struct usb_skel *dev = usb_get_intfdata(intf);
616 mutex_lock(&dev->io_mutex);
617 skel_draw_down(dev);
619 return 0;
622 static int skel_post_reset(struct usb_interface *intf)
624 struct usb_skel *dev = usb_get_intfdata(intf);
626 /* we are sure no URBs are active - no locking needed */
627 dev->errors = -EPIPE;
628 mutex_unlock(&dev->io_mutex);
630 return 0;
633 static struct usb_driver skel_driver = {
634 .name = "skeleton",
635 .probe = skel_probe,
636 .disconnect = skel_disconnect,
637 .suspend = skel_suspend,
638 .resume = skel_resume,
639 .pre_reset = skel_pre_reset,
640 .post_reset = skel_post_reset,
641 .id_table = skel_table,
642 .supports_autosuspend = 1,
645 module_usb_driver(skel_driver);
647 MODULE_LICENSE("GPL");