2 * Copyright (c) 2001 Paul Stewart
3 * Copyright (c) 2001 Vojtech Pavlik
5 * HID char devices, giving access to raw HID device events.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to Paul Stewart <stewart@wetlogic.net>
28 #include <linux/poll.h>
29 #include <linux/slab.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/smp_lock.h>
33 #include <linux/input.h>
34 #include <linux/usb.h>
35 #include <linux/hid.h>
36 #include <linux/hiddev.h>
37 #include <linux/compat.h>
40 #ifdef CONFIG_USB_DYNAMIC_MINORS
41 #define HIDDEV_MINOR_BASE 0
42 #define HIDDEV_MINORS 256
44 #define HIDDEV_MINOR_BASE 96
45 #define HIDDEV_MINORS 16
47 #define HIDDEV_BUFFER_SIZE 2048
52 struct mutex existancelock
;
53 wait_queue_head_t wait
;
54 struct hid_device
*hid
;
55 struct list_head list
;
60 struct hiddev_usage_ref buffer
[HIDDEV_BUFFER_SIZE
];
64 struct fasync_struct
*fasync
;
65 struct hiddev
*hiddev
;
66 struct list_head node
;
67 struct mutex thread_lock
;
70 static struct hiddev
*hiddev_table
[HIDDEV_MINORS
];
73 * Find a report, given the report's type and ID. The ID can be specified
74 * indirectly by REPORT_ID_FIRST (which returns the first report of the given
75 * type) or by (REPORT_ID_NEXT | old_id), which returns the next report of the
76 * given type which follows old_id.
78 static struct hid_report
*
79 hiddev_lookup_report(struct hid_device
*hid
, struct hiddev_report_info
*rinfo
)
81 unsigned int flags
= rinfo
->report_id
& ~HID_REPORT_ID_MASK
;
82 unsigned int rid
= rinfo
->report_id
& HID_REPORT_ID_MASK
;
83 struct hid_report_enum
*report_enum
;
84 struct hid_report
*report
;
85 struct list_head
*list
;
87 if (rinfo
->report_type
< HID_REPORT_TYPE_MIN
||
88 rinfo
->report_type
> HID_REPORT_TYPE_MAX
)
91 report_enum
= hid
->report_enum
+
92 (rinfo
->report_type
- HID_REPORT_TYPE_MIN
);
95 case 0: /* Nothing to do -- report_id is already set correctly */
98 case HID_REPORT_ID_FIRST
:
99 if (list_empty(&report_enum
->report_list
))
102 list
= report_enum
->report_list
.next
;
103 report
= list_entry(list
, struct hid_report
, list
);
104 rinfo
->report_id
= report
->id
;
107 case HID_REPORT_ID_NEXT
:
108 report
= report_enum
->report_id_hash
[rid
];
112 list
= report
->list
.next
;
113 if (list
== &report_enum
->report_list
)
116 report
= list_entry(list
, struct hid_report
, list
);
117 rinfo
->report_id
= report
->id
;
124 return report_enum
->report_id_hash
[rinfo
->report_id
];
128 * Perform an exhaustive search of the report table for a usage, given its
131 static struct hid_field
*
132 hiddev_lookup_usage(struct hid_device
*hid
, struct hiddev_usage_ref
*uref
)
135 struct hid_report
*report
;
136 struct hid_report_enum
*report_enum
;
137 struct hid_field
*field
;
139 if (uref
->report_type
< HID_REPORT_TYPE_MIN
||
140 uref
->report_type
> HID_REPORT_TYPE_MAX
)
143 report_enum
= hid
->report_enum
+
144 (uref
->report_type
- HID_REPORT_TYPE_MIN
);
146 list_for_each_entry(report
, &report_enum
->report_list
, list
) {
147 for (i
= 0; i
< report
->maxfield
; i
++) {
148 field
= report
->field
[i
];
149 for (j
= 0; j
< field
->maxusage
; j
++) {
150 if (field
->usage
[j
].hid
== uref
->usage_code
) {
151 uref
->report_id
= report
->id
;
152 uref
->field_index
= i
;
153 uref
->usage_index
= j
;
163 static void hiddev_send_event(struct hid_device
*hid
,
164 struct hiddev_usage_ref
*uref
)
166 struct hiddev
*hiddev
= hid
->hiddev
;
167 struct hiddev_list
*list
;
170 spin_lock_irqsave(&hiddev
->list_lock
, flags
);
171 list_for_each_entry(list
, &hiddev
->list
, node
) {
172 if (uref
->field_index
!= HID_FIELD_INDEX_NONE
||
173 (list
->flags
& HIDDEV_FLAG_REPORT
) != 0) {
174 list
->buffer
[list
->head
] = *uref
;
175 list
->head
= (list
->head
+ 1) &
176 (HIDDEV_BUFFER_SIZE
- 1);
177 kill_fasync(&list
->fasync
, SIGIO
, POLL_IN
);
180 spin_unlock_irqrestore(&hiddev
->list_lock
, flags
);
182 wake_up_interruptible(&hiddev
->wait
);
186 * This is where hid.c calls into hiddev to pass an event that occurred over
189 void hiddev_hid_event(struct hid_device
*hid
, struct hid_field
*field
,
190 struct hid_usage
*usage
, __s32 value
)
192 unsigned type
= field
->report_type
;
193 struct hiddev_usage_ref uref
;
196 (type
== HID_INPUT_REPORT
) ? HID_REPORT_TYPE_INPUT
:
197 ((type
== HID_OUTPUT_REPORT
) ? HID_REPORT_TYPE_OUTPUT
:
198 ((type
== HID_FEATURE_REPORT
) ? HID_REPORT_TYPE_FEATURE
: 0));
199 uref
.report_id
= field
->report
->id
;
200 uref
.field_index
= field
->index
;
201 uref
.usage_index
= (usage
- field
->usage
);
202 uref
.usage_code
= usage
->hid
;
205 hiddev_send_event(hid
, &uref
);
207 EXPORT_SYMBOL_GPL(hiddev_hid_event
);
209 void hiddev_report_event(struct hid_device
*hid
, struct hid_report
*report
)
211 unsigned type
= report
->type
;
212 struct hiddev_usage_ref uref
;
214 memset(&uref
, 0, sizeof(uref
));
216 (type
== HID_INPUT_REPORT
) ? HID_REPORT_TYPE_INPUT
:
217 ((type
== HID_OUTPUT_REPORT
) ? HID_REPORT_TYPE_OUTPUT
:
218 ((type
== HID_FEATURE_REPORT
) ? HID_REPORT_TYPE_FEATURE
: 0));
219 uref
.report_id
= report
->id
;
220 uref
.field_index
= HID_FIELD_INDEX_NONE
;
222 hiddev_send_event(hid
, &uref
);
228 static int hiddev_fasync(int fd
, struct file
*file
, int on
)
230 struct hiddev_list
*list
= file
->private_data
;
232 return fasync_helper(fd
, file
, on
, &list
->fasync
);
239 static int hiddev_release(struct inode
* inode
, struct file
* file
)
241 struct hiddev_list
*list
= file
->private_data
;
244 spin_lock_irqsave(&list
->hiddev
->list_lock
, flags
);
245 list_del(&list
->node
);
246 spin_unlock_irqrestore(&list
->hiddev
->list_lock
, flags
);
248 if (!--list
->hiddev
->open
) {
249 if (list
->hiddev
->exist
) {
250 usbhid_close(list
->hiddev
->hid
);
251 usbhid_put_power(list
->hiddev
->hid
);
265 static int hiddev_open(struct inode
*inode
, struct file
*file
)
267 struct hiddev_list
*list
;
270 /* See comment in hiddev_connect() for BKL explanation */
272 i
= iminor(inode
) - HIDDEV_MINOR_BASE
;
274 if (i
>= HIDDEV_MINORS
|| i
< 0 || !hiddev_table
[i
])
277 if (!(list
= kzalloc(sizeof(struct hiddev_list
), GFP_KERNEL
)))
279 mutex_init(&list
->thread_lock
);
281 list
->hiddev
= hiddev_table
[i
];
284 file
->private_data
= list
;
287 * no need for locking because the USB major number
288 * is shared which usbcore guards against disconnect
290 if (list
->hiddev
->exist
) {
291 if (!list
->hiddev
->open
++) {
292 res
= usbhid_open(hiddev_table
[i
]->hid
);
303 spin_lock_irq(&list
->hiddev
->list_lock
);
304 list_add_tail(&list
->node
, &hiddev_table
[i
]->list
);
305 spin_unlock_irq(&list
->hiddev
->list_lock
);
307 if (!list
->hiddev
->open
++)
308 if (list
->hiddev
->exist
) {
309 struct hid_device
*hid
= hiddev_table
[i
]->hid
;
310 res
= usbhid_get_power(hid
);
321 file
->private_data
= NULL
;
330 static ssize_t
hiddev_write(struct file
* file
, const char __user
* buffer
, size_t count
, loff_t
*ppos
)
338 static ssize_t
hiddev_read(struct file
* file
, char __user
* buffer
, size_t count
, loff_t
*ppos
)
341 struct hiddev_list
*list
= file
->private_data
;
345 event_size
= ((list
->flags
& HIDDEV_FLAG_UREF
) != 0) ?
346 sizeof(struct hiddev_usage_ref
) : sizeof(struct hiddev_event
);
348 if (count
< event_size
)
351 /* lock against other threads */
352 retval
= mutex_lock_interruptible(&list
->thread_lock
);
356 while (retval
== 0) {
357 if (list
->head
== list
->tail
) {
358 prepare_to_wait(&list
->hiddev
->wait
, &wait
, TASK_INTERRUPTIBLE
);
360 while (list
->head
== list
->tail
) {
361 if (file
->f_flags
& O_NONBLOCK
) {
365 if (signal_pending(current
)) {
366 retval
= -ERESTARTSYS
;
369 if (!list
->hiddev
->exist
) {
374 /* let O_NONBLOCK tasks run */
375 mutex_unlock(&list
->thread_lock
);
377 if (mutex_lock_interruptible(&list
->thread_lock
))
379 set_current_state(TASK_INTERRUPTIBLE
);
381 finish_wait(&list
->hiddev
->wait
, &wait
);
386 mutex_unlock(&list
->thread_lock
);
391 while (list
->head
!= list
->tail
&&
392 retval
+ event_size
<= count
) {
393 if ((list
->flags
& HIDDEV_FLAG_UREF
) == 0) {
394 if (list
->buffer
[list
->tail
].field_index
!= HID_FIELD_INDEX_NONE
) {
395 struct hiddev_event event
;
397 event
.hid
= list
->buffer
[list
->tail
].usage_code
;
398 event
.value
= list
->buffer
[list
->tail
].value
;
399 if (copy_to_user(buffer
+ retval
, &event
, sizeof(struct hiddev_event
))) {
400 mutex_unlock(&list
->thread_lock
);
403 retval
+= sizeof(struct hiddev_event
);
406 if (list
->buffer
[list
->tail
].field_index
!= HID_FIELD_INDEX_NONE
||
407 (list
->flags
& HIDDEV_FLAG_REPORT
) != 0) {
409 if (copy_to_user(buffer
+ retval
, list
->buffer
+ list
->tail
, sizeof(struct hiddev_usage_ref
))) {
410 mutex_unlock(&list
->thread_lock
);
413 retval
+= sizeof(struct hiddev_usage_ref
);
416 list
->tail
= (list
->tail
+ 1) & (HIDDEV_BUFFER_SIZE
- 1);
420 mutex_unlock(&list
->thread_lock
);
427 * No kernel lock - fine
429 static unsigned int hiddev_poll(struct file
*file
, poll_table
*wait
)
431 struct hiddev_list
*list
= file
->private_data
;
433 poll_wait(file
, &list
->hiddev
->wait
, wait
);
434 if (list
->head
!= list
->tail
)
435 return POLLIN
| POLLRDNORM
;
436 if (!list
->hiddev
->exist
)
437 return POLLERR
| POLLHUP
;
444 static noinline
int hiddev_ioctl_usage(struct hiddev
*hiddev
, unsigned int cmd
, void __user
*user_arg
)
446 struct hid_device
*hid
= hiddev
->hid
;
447 struct hiddev_report_info rinfo
;
448 struct hiddev_usage_ref_multi
*uref_multi
= NULL
;
449 struct hiddev_usage_ref
*uref
;
450 struct hid_report
*report
;
451 struct hid_field
*field
;
454 uref_multi
= kmalloc(sizeof(struct hiddev_usage_ref_multi
), GFP_KERNEL
);
457 uref
= &uref_multi
->uref
;
458 if (cmd
== HIDIOCGUSAGES
|| cmd
== HIDIOCSUSAGES
) {
459 if (copy_from_user(uref_multi
, user_arg
,
460 sizeof(*uref_multi
)))
463 if (copy_from_user(uref
, user_arg
, sizeof(*uref
)))
469 rinfo
.report_type
= uref
->report_type
;
470 rinfo
.report_id
= uref
->report_id
;
471 if ((report
= hiddev_lookup_report(hid
, &rinfo
)) == NULL
)
474 if (uref
->field_index
>= report
->maxfield
)
477 field
= report
->field
[uref
->field_index
];
478 if (uref
->usage_index
>= field
->maxusage
)
481 uref
->usage_code
= field
->usage
[uref
->usage_index
].hid
;
483 if (copy_to_user(user_arg
, uref
, sizeof(*uref
)))
489 if (cmd
!= HIDIOCGUSAGE
&&
490 cmd
!= HIDIOCGUSAGES
&&
491 uref
->report_type
== HID_REPORT_TYPE_INPUT
)
494 if (uref
->report_id
== HID_REPORT_ID_UNKNOWN
) {
495 field
= hiddev_lookup_usage(hid
, uref
);
499 rinfo
.report_type
= uref
->report_type
;
500 rinfo
.report_id
= uref
->report_id
;
501 if ((report
= hiddev_lookup_report(hid
, &rinfo
)) == NULL
)
504 if (uref
->field_index
>= report
->maxfield
)
507 field
= report
->field
[uref
->field_index
];
509 if (cmd
== HIDIOCGCOLLECTIONINDEX
) {
510 if (uref
->usage_index
>= field
->maxusage
)
512 } else if (uref
->usage_index
>= field
->report_count
)
515 else if ((cmd
== HIDIOCGUSAGES
|| cmd
== HIDIOCSUSAGES
) &&
516 (uref_multi
->num_values
> HID_MAX_MULTI_USAGES
||
517 uref
->usage_index
+ uref_multi
->num_values
> field
->report_count
))
523 uref
->value
= field
->value
[uref
->usage_index
];
524 if (copy_to_user(user_arg
, uref
, sizeof(*uref
)))
529 field
->value
[uref
->usage_index
] = uref
->value
;
532 case HIDIOCGCOLLECTIONINDEX
:
533 i
= field
->usage
[uref
->usage_index
].collection_index
;
537 for (i
= 0; i
< uref_multi
->num_values
; i
++)
538 uref_multi
->values
[i
] =
539 field
->value
[uref
->usage_index
+ i
];
540 if (copy_to_user(user_arg
, uref_multi
,
541 sizeof(*uref_multi
)))
545 for (i
= 0; i
< uref_multi
->num_values
; i
++)
546 field
->value
[uref
->usage_index
+ i
] =
547 uref_multi
->values
[i
];
563 static noinline
int hiddev_ioctl_string(struct hiddev
*hiddev
, unsigned int cmd
, void __user
*user_arg
)
565 struct hid_device
*hid
= hiddev
->hid
;
566 struct usb_device
*dev
= hid_to_usb_dev(hid
);
570 if (get_user(idx
, (int __user
*)user_arg
))
573 if ((buf
= kmalloc(HID_STRING_SIZE
, GFP_KERNEL
)) == NULL
)
576 if ((len
= usb_string(dev
, idx
, buf
, HID_STRING_SIZE
-1)) < 0) {
581 if (copy_to_user(user_arg
+sizeof(int), buf
, len
+1)) {
591 static long hiddev_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
593 struct hiddev_list
*list
= file
->private_data
;
594 struct hiddev
*hiddev
= list
->hiddev
;
595 struct hid_device
*hid
= hiddev
->hid
;
596 struct usb_device
*dev
= hid_to_usb_dev(hid
);
597 struct hiddev_collection_info cinfo
;
598 struct hiddev_report_info rinfo
;
599 struct hiddev_field_info finfo
;
600 struct hiddev_devinfo dinfo
;
601 struct hid_report
*report
;
602 struct hid_field
*field
;
603 struct usbhid_device
*usbhid
= hid
->driver_data
;
604 void __user
*user_arg
= (void __user
*)arg
;
607 /* Called without BKL by compat methods so no BKL taken */
609 /* FIXME: Who or what stop this racing with a disconnect ?? */
616 return put_user(HID_VERSION
, (int __user
*)arg
);
618 case HIDIOCAPPLICATION
:
619 if (arg
< 0 || arg
>= hid
->maxapplication
)
622 for (i
= 0; i
< hid
->maxcollection
; i
++)
623 if (hid
->collection
[i
].type
==
624 HID_COLLECTION_APPLICATION
&& arg
-- == 0)
627 if (i
== hid
->maxcollection
)
630 return hid
->collection
[i
].usage
;
633 dinfo
.bustype
= BUS_USB
;
634 dinfo
.busnum
= dev
->bus
->busnum
;
635 dinfo
.devnum
= dev
->devnum
;
636 dinfo
.ifnum
= usbhid
->ifnum
;
637 dinfo
.vendor
= le16_to_cpu(dev
->descriptor
.idVendor
);
638 dinfo
.product
= le16_to_cpu(dev
->descriptor
.idProduct
);
639 dinfo
.version
= le16_to_cpu(dev
->descriptor
.bcdDevice
);
640 dinfo
.num_applications
= hid
->maxapplication
;
641 if (copy_to_user(user_arg
, &dinfo
, sizeof(dinfo
)))
647 if (put_user(list
->flags
, (int __user
*)arg
))
655 if (get_user(newflags
, (int __user
*)arg
))
658 if ((newflags
& ~HIDDEV_FLAGS
) != 0 ||
659 ((newflags
& HIDDEV_FLAG_REPORT
) != 0 &&
660 (newflags
& HIDDEV_FLAG_UREF
) == 0))
663 list
->flags
= newflags
;
669 mutex_lock(&hiddev
->existancelock
);
671 r
= hiddev_ioctl_string(hiddev
, cmd
, user_arg
);
674 mutex_unlock(&hiddev
->existancelock
);
677 case HIDIOCINITREPORT
:
678 mutex_lock(&hiddev
->existancelock
);
679 if (!hiddev
->exist
) {
680 mutex_unlock(&hiddev
->existancelock
);
683 usbhid_init_reports(hid
);
684 mutex_unlock(&hiddev
->existancelock
);
689 if (copy_from_user(&rinfo
, user_arg
, sizeof(rinfo
)))
692 if (rinfo
.report_type
== HID_REPORT_TYPE_OUTPUT
)
695 if ((report
= hiddev_lookup_report(hid
, &rinfo
)) == NULL
)
698 mutex_lock(&hiddev
->existancelock
);
700 usbhid_submit_report(hid
, report
, USB_DIR_IN
);
703 mutex_unlock(&hiddev
->existancelock
);
708 if (copy_from_user(&rinfo
, user_arg
, sizeof(rinfo
)))
711 if (rinfo
.report_type
== HID_REPORT_TYPE_INPUT
)
714 if ((report
= hiddev_lookup_report(hid
, &rinfo
)) == NULL
)
717 mutex_lock(&hiddev
->existancelock
);
719 usbhid_submit_report(hid
, report
, USB_DIR_OUT
);
722 mutex_unlock(&hiddev
->existancelock
);
726 case HIDIOCGREPORTINFO
:
727 if (copy_from_user(&rinfo
, user_arg
, sizeof(rinfo
)))
730 if ((report
= hiddev_lookup_report(hid
, &rinfo
)) == NULL
)
733 rinfo
.num_fields
= report
->maxfield
;
735 if (copy_to_user(user_arg
, &rinfo
, sizeof(rinfo
)))
740 case HIDIOCGFIELDINFO
:
741 if (copy_from_user(&finfo
, user_arg
, sizeof(finfo
)))
743 rinfo
.report_type
= finfo
.report_type
;
744 rinfo
.report_id
= finfo
.report_id
;
745 if ((report
= hiddev_lookup_report(hid
, &rinfo
)) == NULL
)
748 if (finfo
.field_index
>= report
->maxfield
)
751 field
= report
->field
[finfo
.field_index
];
752 memset(&finfo
, 0, sizeof(finfo
));
753 finfo
.report_type
= rinfo
.report_type
;
754 finfo
.report_id
= rinfo
.report_id
;
755 finfo
.field_index
= field
->report_count
- 1;
756 finfo
.maxusage
= field
->maxusage
;
757 finfo
.flags
= field
->flags
;
758 finfo
.physical
= field
->physical
;
759 finfo
.logical
= field
->logical
;
760 finfo
.application
= field
->application
;
761 finfo
.logical_minimum
= field
->logical_minimum
;
762 finfo
.logical_maximum
= field
->logical_maximum
;
763 finfo
.physical_minimum
= field
->physical_minimum
;
764 finfo
.physical_maximum
= field
->physical_maximum
;
765 finfo
.unit_exponent
= field
->unit_exponent
;
766 finfo
.unit
= field
->unit
;
768 if (copy_to_user(user_arg
, &finfo
, sizeof(finfo
)))
779 case HIDIOCGCOLLECTIONINDEX
:
780 mutex_lock(&hiddev
->existancelock
);
782 r
= hiddev_ioctl_usage(hiddev
, cmd
, user_arg
);
785 mutex_unlock(&hiddev
->existancelock
);
788 case HIDIOCGCOLLECTIONINFO
:
789 if (copy_from_user(&cinfo
, user_arg
, sizeof(cinfo
)))
792 if (cinfo
.index
>= hid
->maxcollection
)
795 cinfo
.type
= hid
->collection
[cinfo
.index
].type
;
796 cinfo
.usage
= hid
->collection
[cinfo
.index
].usage
;
797 cinfo
.level
= hid
->collection
[cinfo
.index
].level
;
799 if (copy_to_user(user_arg
, &cinfo
, sizeof(cinfo
)))
805 if (_IOC_TYPE(cmd
) != 'H' || _IOC_DIR(cmd
) != _IOC_READ
)
808 if (_IOC_NR(cmd
) == _IOC_NR(HIDIOCGNAME(0))) {
812 len
= strlen(hid
->name
) + 1;
813 if (len
> _IOC_SIZE(cmd
))
814 len
= _IOC_SIZE(cmd
);
815 return copy_to_user(user_arg
, hid
->name
, len
) ?
819 if (_IOC_NR(cmd
) == _IOC_NR(HIDIOCGPHYS(0))) {
823 len
= strlen(hid
->phys
) + 1;
824 if (len
> _IOC_SIZE(cmd
))
825 len
= _IOC_SIZE(cmd
);
826 return copy_to_user(user_arg
, hid
->phys
, len
) ?
834 static long hiddev_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
836 return hiddev_ioctl(file
, cmd
, (unsigned long)compat_ptr(arg
));
840 static const struct file_operations hiddev_fops
= {
841 .owner
= THIS_MODULE
,
843 .write
= hiddev_write
,
846 .release
= hiddev_release
,
847 .unlocked_ioctl
= hiddev_ioctl
,
848 .fasync
= hiddev_fasync
,
850 .compat_ioctl
= hiddev_compat_ioctl
,
854 static char *hiddev_devnode(struct device
*dev
, mode_t
*mode
)
856 return kasprintf(GFP_KERNEL
, "usb/%s", dev_name(dev
));
859 static struct usb_class_driver hiddev_class
= {
861 .devnode
= hiddev_devnode
,
862 .fops
= &hiddev_fops
,
863 .minor_base
= HIDDEV_MINOR_BASE
,
867 * This is where hid.c calls us to connect a hid device to the hiddev driver
869 int hiddev_connect(struct hid_device
*hid
, unsigned int force
)
871 struct hiddev
*hiddev
;
872 struct usbhid_device
*usbhid
= hid
->driver_data
;
877 for (i
= 0; i
< hid
->maxcollection
; i
++)
878 if (hid
->collection
[i
].type
==
879 HID_COLLECTION_APPLICATION
&&
880 !IS_INPUT_APPLICATION(hid
->collection
[i
].usage
))
883 if (i
== hid
->maxcollection
)
887 if (!(hiddev
= kzalloc(sizeof(struct hiddev
), GFP_KERNEL
)))
890 init_waitqueue_head(&hiddev
->wait
);
891 INIT_LIST_HEAD(&hiddev
->list
);
892 spin_lock_init(&hiddev
->list_lock
);
893 mutex_init(&hiddev
->existancelock
);
894 hid
->hiddev
= hiddev
;
899 * BKL here is used to avoid race after usb_register_dev().
900 * Once the device node has been created, open() could happen on it.
901 * The code below will then fail, as hiddev_table hasn't been
904 * The obvious fix -- introducing mutex to guard hiddev_table[]
905 * doesn't work, as usb_open() and usb_register_dev() both take
906 * minor_rwsem, thus we'll have ABBA deadlock.
908 * Before BKL pushdown, usb_open() had been acquiring it in right
909 * order, so _open() was safe to use it to protect from this race.
910 * Now the order is different, but AB-BA deadlock still doesn't occur
911 * as BKL is dropped on schedule() (i.e. while sleeping on
912 * minor_rwsem). Fugly.
915 retval
= usb_register_dev(usbhid
->intf
, &hiddev_class
);
917 err_hid("Not able to get a minor for this device.");
923 hid
->minor
= usbhid
->intf
->minor
;
924 hiddev_table
[usbhid
->intf
->minor
- HIDDEV_MINOR_BASE
] = hiddev
;
932 * This is where hid.c calls us to disconnect a hiddev device from the
933 * corresponding hid device (usually because the usb device has disconnected)
935 static struct usb_class_driver hiddev_class
;
936 void hiddev_disconnect(struct hid_device
*hid
)
938 struct hiddev
*hiddev
= hid
->hiddev
;
939 struct usbhid_device
*usbhid
= hid
->driver_data
;
941 mutex_lock(&hiddev
->existancelock
);
943 mutex_unlock(&hiddev
->existancelock
);
945 hiddev_table
[hiddev
->hid
->minor
- HIDDEV_MINOR_BASE
] = NULL
;
946 usb_deregister_dev(usbhid
->intf
, &hiddev_class
);
949 usbhid_close(hiddev
->hid
);
950 wake_up_interruptible(&hiddev
->wait
);
956 /* Currently this driver is a USB driver. It's not a conventional one in
957 * the sense that it doesn't probe at the USB level. Instead it waits to
958 * be connected by HID through the hiddev_connect / hiddev_disconnect
959 * routines. The reason to register as a USB device is to gain part of the
960 * minor number space from the USB major.
962 * In theory, should the HID code be generalized to more than one physical
963 * medium (say, IEEE 1384), this driver will probably need to register its
964 * own major number, and in doing so, no longer need to register with USB.
965 * At that point the probe routine and hiddev_driver struct below will no
970 /* We never attach in this manner, and rely on HID to connect us. This
971 * is why there is no disconnect routine defined in the usb_driver either.
973 static int hiddev_usbd_probe(struct usb_interface
*intf
,
974 const struct usb_device_id
*hiddev_info
)
979 static /* const */ struct usb_driver hiddev_driver
= {
981 .probe
= hiddev_usbd_probe
,
984 int __init
hiddev_init(void)
986 return usb_register(&hiddev_driver
);
989 void hiddev_exit(void)
991 usb_deregister(&hiddev_driver
);