x86: cache_info: Kill the atomic allocation in amd_init_l3_cache()
[linux-2.6/linux-mips.git] / drivers / input / serio / serio.c
blobba70058e2be3ae2bc6a3289c08d541f336a4aad8
1 /*
2 * The Serio abstraction module
4 * Copyright (c) 1999-2004 Vojtech Pavlik
5 * Copyright (c) 2004 Dmitry Torokhov
6 * Copyright (c) 2003 Daniele Bellucci
7 */
9 /*
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/stddef.h>
32 #include <linux/module.h>
33 #include <linux/serio.h>
34 #include <linux/errno.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/workqueue.h>
38 #include <linux/mutex.h>
40 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
41 MODULE_DESCRIPTION("Serio abstraction core");
42 MODULE_LICENSE("GPL");
45 * serio_mutex protects entire serio subsystem and is taken every time
46 * serio port or driver registered or unregistered.
48 static DEFINE_MUTEX(serio_mutex);
50 static LIST_HEAD(serio_list);
52 static struct bus_type serio_bus;
54 static void serio_add_port(struct serio *serio);
55 static int serio_reconnect_port(struct serio *serio);
56 static void serio_disconnect_port(struct serio *serio);
57 static void serio_reconnect_subtree(struct serio *serio);
58 static void serio_attach_driver(struct serio_driver *drv);
60 static int serio_connect_driver(struct serio *serio, struct serio_driver *drv)
62 int retval;
64 mutex_lock(&serio->drv_mutex);
65 retval = drv->connect(serio, drv);
66 mutex_unlock(&serio->drv_mutex);
68 return retval;
71 static int serio_reconnect_driver(struct serio *serio)
73 int retval = -1;
75 mutex_lock(&serio->drv_mutex);
76 if (serio->drv && serio->drv->reconnect)
77 retval = serio->drv->reconnect(serio);
78 mutex_unlock(&serio->drv_mutex);
80 return retval;
83 static void serio_disconnect_driver(struct serio *serio)
85 mutex_lock(&serio->drv_mutex);
86 if (serio->drv)
87 serio->drv->disconnect(serio);
88 mutex_unlock(&serio->drv_mutex);
91 static int serio_match_port(const struct serio_device_id *ids, struct serio *serio)
93 while (ids->type || ids->proto) {
94 if ((ids->type == SERIO_ANY || ids->type == serio->id.type) &&
95 (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) &&
96 (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) &&
97 (ids->id == SERIO_ANY || ids->id == serio->id.id))
98 return 1;
99 ids++;
101 return 0;
105 * Basic serio -> driver core mappings
108 static int serio_bind_driver(struct serio *serio, struct serio_driver *drv)
110 int error;
112 if (serio_match_port(drv->id_table, serio)) {
114 serio->dev.driver = &drv->driver;
115 if (serio_connect_driver(serio, drv)) {
116 serio->dev.driver = NULL;
117 return -ENODEV;
120 error = device_bind_driver(&serio->dev);
121 if (error) {
122 dev_warn(&serio->dev,
123 "device_bind_driver() failed for %s (%s) and %s, error: %d\n",
124 serio->phys, serio->name,
125 drv->description, error);
126 serio_disconnect_driver(serio);
127 serio->dev.driver = NULL;
128 return error;
131 return 0;
134 static void serio_find_driver(struct serio *serio)
136 int error;
138 error = device_attach(&serio->dev);
139 if (error < 0)
140 dev_warn(&serio->dev,
141 "device_attach() failed for %s (%s), error: %d\n",
142 serio->phys, serio->name, error);
147 * Serio event processing.
150 enum serio_event_type {
151 SERIO_RESCAN_PORT,
152 SERIO_RECONNECT_PORT,
153 SERIO_RECONNECT_SUBTREE,
154 SERIO_REGISTER_PORT,
155 SERIO_ATTACH_DRIVER,
158 struct serio_event {
159 enum serio_event_type type;
160 void *object;
161 struct module *owner;
162 struct list_head node;
165 static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */
166 static LIST_HEAD(serio_event_list);
168 static struct serio_event *serio_get_event(void)
170 struct serio_event *event = NULL;
171 unsigned long flags;
173 spin_lock_irqsave(&serio_event_lock, flags);
175 if (!list_empty(&serio_event_list)) {
176 event = list_first_entry(&serio_event_list,
177 struct serio_event, node);
178 list_del_init(&event->node);
181 spin_unlock_irqrestore(&serio_event_lock, flags);
182 return event;
185 static void serio_free_event(struct serio_event *event)
187 module_put(event->owner);
188 kfree(event);
191 static void serio_remove_duplicate_events(void *object,
192 enum serio_event_type type)
194 struct serio_event *e, *next;
195 unsigned long flags;
197 spin_lock_irqsave(&serio_event_lock, flags);
199 list_for_each_entry_safe(e, next, &serio_event_list, node) {
200 if (object == e->object) {
202 * If this event is of different type we should not
203 * look further - we only suppress duplicate events
204 * that were sent back-to-back.
206 if (type != e->type)
207 break;
209 list_del_init(&e->node);
210 serio_free_event(e);
214 spin_unlock_irqrestore(&serio_event_lock, flags);
217 static void serio_handle_event(struct work_struct *work)
219 struct serio_event *event;
221 mutex_lock(&serio_mutex);
223 while ((event = serio_get_event())) {
225 switch (event->type) {
227 case SERIO_REGISTER_PORT:
228 serio_add_port(event->object);
229 break;
231 case SERIO_RECONNECT_PORT:
232 serio_reconnect_port(event->object);
233 break;
235 case SERIO_RESCAN_PORT:
236 serio_disconnect_port(event->object);
237 serio_find_driver(event->object);
238 break;
240 case SERIO_RECONNECT_SUBTREE:
241 serio_reconnect_subtree(event->object);
242 break;
244 case SERIO_ATTACH_DRIVER:
245 serio_attach_driver(event->object);
246 break;
249 serio_remove_duplicate_events(event->object, event->type);
250 serio_free_event(event);
253 mutex_unlock(&serio_mutex);
256 static DECLARE_WORK(serio_event_work, serio_handle_event);
258 static int serio_queue_event(void *object, struct module *owner,
259 enum serio_event_type event_type)
261 unsigned long flags;
262 struct serio_event *event;
263 int retval = 0;
265 spin_lock_irqsave(&serio_event_lock, flags);
268 * Scan event list for the other events for the same serio port,
269 * starting with the most recent one. If event is the same we
270 * do not need add new one. If event is of different type we
271 * need to add this event and should not look further because
272 * we need to preseve sequence of distinct events.
274 list_for_each_entry_reverse(event, &serio_event_list, node) {
275 if (event->object == object) {
276 if (event->type == event_type)
277 goto out;
278 break;
282 event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC);
283 if (!event) {
284 pr_err("Not enough memory to queue event %d\n", event_type);
285 retval = -ENOMEM;
286 goto out;
289 if (!try_module_get(owner)) {
290 pr_warning("Can't get module reference, dropping event %d\n",
291 event_type);
292 kfree(event);
293 retval = -EINVAL;
294 goto out;
297 event->type = event_type;
298 event->object = object;
299 event->owner = owner;
301 list_add_tail(&event->node, &serio_event_list);
302 queue_work(system_long_wq, &serio_event_work);
304 out:
305 spin_unlock_irqrestore(&serio_event_lock, flags);
306 return retval;
310 * Remove all events that have been submitted for a given
311 * object, be it serio port or driver.
313 static void serio_remove_pending_events(void *object)
315 struct serio_event *event, *next;
316 unsigned long flags;
318 spin_lock_irqsave(&serio_event_lock, flags);
320 list_for_each_entry_safe(event, next, &serio_event_list, node) {
321 if (event->object == object) {
322 list_del_init(&event->node);
323 serio_free_event(event);
327 spin_unlock_irqrestore(&serio_event_lock, flags);
331 * Locate child serio port (if any) that has not been fully registered yet.
333 * Children are registered by driver's connect() handler so there can't be a
334 * grandchild pending registration together with a child.
336 static struct serio *serio_get_pending_child(struct serio *parent)
338 struct serio_event *event;
339 struct serio *serio, *child = NULL;
340 unsigned long flags;
342 spin_lock_irqsave(&serio_event_lock, flags);
344 list_for_each_entry(event, &serio_event_list, node) {
345 if (event->type == SERIO_REGISTER_PORT) {
346 serio = event->object;
347 if (serio->parent == parent) {
348 child = serio;
349 break;
354 spin_unlock_irqrestore(&serio_event_lock, flags);
355 return child;
359 * Serio port operations
362 static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf)
364 struct serio *serio = to_serio_port(dev);
365 return sprintf(buf, "%s\n", serio->name);
368 static ssize_t serio_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
370 struct serio *serio = to_serio_port(dev);
372 return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n",
373 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
376 static ssize_t serio_show_id_type(struct device *dev, struct device_attribute *attr, char *buf)
378 struct serio *serio = to_serio_port(dev);
379 return sprintf(buf, "%02x\n", serio->id.type);
382 static ssize_t serio_show_id_proto(struct device *dev, struct device_attribute *attr, char *buf)
384 struct serio *serio = to_serio_port(dev);
385 return sprintf(buf, "%02x\n", serio->id.proto);
388 static ssize_t serio_show_id_id(struct device *dev, struct device_attribute *attr, char *buf)
390 struct serio *serio = to_serio_port(dev);
391 return sprintf(buf, "%02x\n", serio->id.id);
394 static ssize_t serio_show_id_extra(struct device *dev, struct device_attribute *attr, char *buf)
396 struct serio *serio = to_serio_port(dev);
397 return sprintf(buf, "%02x\n", serio->id.extra);
400 static DEVICE_ATTR(type, S_IRUGO, serio_show_id_type, NULL);
401 static DEVICE_ATTR(proto, S_IRUGO, serio_show_id_proto, NULL);
402 static DEVICE_ATTR(id, S_IRUGO, serio_show_id_id, NULL);
403 static DEVICE_ATTR(extra, S_IRUGO, serio_show_id_extra, NULL);
405 static struct attribute *serio_device_id_attrs[] = {
406 &dev_attr_type.attr,
407 &dev_attr_proto.attr,
408 &dev_attr_id.attr,
409 &dev_attr_extra.attr,
410 NULL
413 static struct attribute_group serio_id_attr_group = {
414 .name = "id",
415 .attrs = serio_device_id_attrs,
418 static const struct attribute_group *serio_device_attr_groups[] = {
419 &serio_id_attr_group,
420 NULL
423 static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
425 struct serio *serio = to_serio_port(dev);
426 struct device_driver *drv;
427 int error;
429 error = mutex_lock_interruptible(&serio_mutex);
430 if (error)
431 return error;
433 if (!strncmp(buf, "none", count)) {
434 serio_disconnect_port(serio);
435 } else if (!strncmp(buf, "reconnect", count)) {
436 serio_reconnect_subtree(serio);
437 } else if (!strncmp(buf, "rescan", count)) {
438 serio_disconnect_port(serio);
439 serio_find_driver(serio);
440 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
441 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
442 serio_disconnect_port(serio);
443 error = serio_bind_driver(serio, to_serio_driver(drv));
444 put_driver(drv);
445 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
446 } else {
447 error = -EINVAL;
450 mutex_unlock(&serio_mutex);
452 return error ? error : count;
455 static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf)
457 struct serio *serio = to_serio_port(dev);
458 return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto");
461 static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
463 struct serio *serio = to_serio_port(dev);
464 int retval;
466 retval = count;
467 if (!strncmp(buf, "manual", count)) {
468 serio->manual_bind = true;
469 } else if (!strncmp(buf, "auto", count)) {
470 serio->manual_bind = false;
471 } else {
472 retval = -EINVAL;
475 return retval;
478 static struct device_attribute serio_device_attrs[] = {
479 __ATTR(description, S_IRUGO, serio_show_description, NULL),
480 __ATTR(modalias, S_IRUGO, serio_show_modalias, NULL),
481 __ATTR(drvctl, S_IWUSR, NULL, serio_rebind_driver),
482 __ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode),
483 __ATTR_NULL
487 static void serio_release_port(struct device *dev)
489 struct serio *serio = to_serio_port(dev);
491 kfree(serio);
492 module_put(THIS_MODULE);
496 * Prepare serio port for registration.
498 static void serio_init_port(struct serio *serio)
500 static atomic_t serio_no = ATOMIC_INIT(0);
502 __module_get(THIS_MODULE);
504 INIT_LIST_HEAD(&serio->node);
505 INIT_LIST_HEAD(&serio->child_node);
506 INIT_LIST_HEAD(&serio->children);
507 spin_lock_init(&serio->lock);
508 mutex_init(&serio->drv_mutex);
509 device_initialize(&serio->dev);
510 dev_set_name(&serio->dev, "serio%ld",
511 (long)atomic_inc_return(&serio_no) - 1);
512 serio->dev.bus = &serio_bus;
513 serio->dev.release = serio_release_port;
514 serio->dev.groups = serio_device_attr_groups;
515 if (serio->parent) {
516 serio->dev.parent = &serio->parent->dev;
517 serio->depth = serio->parent->depth + 1;
518 } else
519 serio->depth = 0;
520 lockdep_set_subclass(&serio->lock, serio->depth);
524 * Complete serio port registration.
525 * Driver core will attempt to find appropriate driver for the port.
527 static void serio_add_port(struct serio *serio)
529 struct serio *parent = serio->parent;
530 int error;
532 if (parent) {
533 serio_pause_rx(parent);
534 list_add_tail(&serio->child_node, &parent->children);
535 serio_continue_rx(parent);
538 list_add_tail(&serio->node, &serio_list);
540 if (serio->start)
541 serio->start(serio);
543 error = device_add(&serio->dev);
544 if (error)
545 dev_err(&serio->dev,
546 "device_add() failed for %s (%s), error: %d\n",
547 serio->phys, serio->name, error);
551 * serio_destroy_port() completes unregistration process and removes
552 * port from the system
554 static void serio_destroy_port(struct serio *serio)
556 struct serio *child;
558 while ((child = serio_get_pending_child(serio)) != NULL) {
559 serio_remove_pending_events(child);
560 put_device(&child->dev);
563 if (serio->stop)
564 serio->stop(serio);
566 if (serio->parent) {
567 serio_pause_rx(serio->parent);
568 list_del_init(&serio->child_node);
569 serio_continue_rx(serio->parent);
570 serio->parent = NULL;
573 if (device_is_registered(&serio->dev))
574 device_del(&serio->dev);
576 list_del_init(&serio->node);
577 serio_remove_pending_events(serio);
578 put_device(&serio->dev);
582 * Reconnect serio port (re-initialize attached device).
583 * If reconnect fails (old device is no longer attached or
584 * there was no device to begin with) we do full rescan in
585 * hope of finding a driver for the port.
587 static int serio_reconnect_port(struct serio *serio)
589 int error = serio_reconnect_driver(serio);
591 if (error) {
592 serio_disconnect_port(serio);
593 serio_find_driver(serio);
596 return error;
600 * Reconnect serio port and all its children (re-initialize attached
601 * devices).
603 static void serio_reconnect_subtree(struct serio *root)
605 struct serio *s = root;
606 int error;
608 do {
609 error = serio_reconnect_port(s);
610 if (!error) {
612 * Reconnect was successful, move on to do the
613 * first child.
615 if (!list_empty(&s->children)) {
616 s = list_first_entry(&s->children,
617 struct serio, child_node);
618 continue;
623 * Either it was a leaf node or reconnect failed and it
624 * became a leaf node. Continue reconnecting starting with
625 * the next sibling of the parent node.
627 while (s != root) {
628 struct serio *parent = s->parent;
630 if (!list_is_last(&s->child_node, &parent->children)) {
631 s = list_entry(s->child_node.next,
632 struct serio, child_node);
633 break;
636 s = parent;
638 } while (s != root);
642 * serio_disconnect_port() unbinds a port from its driver. As a side effect
643 * all children ports are unbound and destroyed.
645 static void serio_disconnect_port(struct serio *serio)
647 struct serio *s = serio;
650 * Children ports should be disconnected and destroyed
651 * first; we travel the tree in depth-first order.
653 while (!list_empty(&serio->children)) {
655 /* Locate a leaf */
656 while (!list_empty(&s->children))
657 s = list_first_entry(&s->children,
658 struct serio, child_node);
661 * Prune this leaf node unless it is the one we
662 * started with.
664 if (s != serio) {
665 struct serio *parent = s->parent;
667 device_release_driver(&s->dev);
668 serio_destroy_port(s);
670 s = parent;
675 * OK, no children left, now disconnect this port.
677 device_release_driver(&serio->dev);
680 void serio_rescan(struct serio *serio)
682 serio_queue_event(serio, NULL, SERIO_RESCAN_PORT);
684 EXPORT_SYMBOL(serio_rescan);
686 void serio_reconnect(struct serio *serio)
688 serio_queue_event(serio, NULL, SERIO_RECONNECT_SUBTREE);
690 EXPORT_SYMBOL(serio_reconnect);
693 * Submits register request to kseriod for subsequent execution.
694 * Note that port registration is always asynchronous.
696 void __serio_register_port(struct serio *serio, struct module *owner)
698 serio_init_port(serio);
699 serio_queue_event(serio, owner, SERIO_REGISTER_PORT);
701 EXPORT_SYMBOL(__serio_register_port);
704 * Synchronously unregisters serio port.
706 void serio_unregister_port(struct serio *serio)
708 mutex_lock(&serio_mutex);
709 serio_disconnect_port(serio);
710 serio_destroy_port(serio);
711 mutex_unlock(&serio_mutex);
713 EXPORT_SYMBOL(serio_unregister_port);
716 * Safely unregisters children ports if they are present.
718 void serio_unregister_child_port(struct serio *serio)
720 struct serio *s, *next;
722 mutex_lock(&serio_mutex);
723 list_for_each_entry_safe(s, next, &serio->children, child_node) {
724 serio_disconnect_port(s);
725 serio_destroy_port(s);
727 mutex_unlock(&serio_mutex);
729 EXPORT_SYMBOL(serio_unregister_child_port);
733 * Serio driver operations
736 static ssize_t serio_driver_show_description(struct device_driver *drv, char *buf)
738 struct serio_driver *driver = to_serio_driver(drv);
739 return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)");
742 static ssize_t serio_driver_show_bind_mode(struct device_driver *drv, char *buf)
744 struct serio_driver *serio_drv = to_serio_driver(drv);
745 return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto");
748 static ssize_t serio_driver_set_bind_mode(struct device_driver *drv, const char *buf, size_t count)
750 struct serio_driver *serio_drv = to_serio_driver(drv);
751 int retval;
753 retval = count;
754 if (!strncmp(buf, "manual", count)) {
755 serio_drv->manual_bind = true;
756 } else if (!strncmp(buf, "auto", count)) {
757 serio_drv->manual_bind = false;
758 } else {
759 retval = -EINVAL;
762 return retval;
766 static struct driver_attribute serio_driver_attrs[] = {
767 __ATTR(description, S_IRUGO, serio_driver_show_description, NULL),
768 __ATTR(bind_mode, S_IWUSR | S_IRUGO,
769 serio_driver_show_bind_mode, serio_driver_set_bind_mode),
770 __ATTR_NULL
773 static int serio_driver_probe(struct device *dev)
775 struct serio *serio = to_serio_port(dev);
776 struct serio_driver *drv = to_serio_driver(dev->driver);
778 return serio_connect_driver(serio, drv);
781 static int serio_driver_remove(struct device *dev)
783 struct serio *serio = to_serio_port(dev);
785 serio_disconnect_driver(serio);
786 return 0;
789 static void serio_cleanup(struct serio *serio)
791 mutex_lock(&serio->drv_mutex);
792 if (serio->drv && serio->drv->cleanup)
793 serio->drv->cleanup(serio);
794 mutex_unlock(&serio->drv_mutex);
797 static void serio_shutdown(struct device *dev)
799 struct serio *serio = to_serio_port(dev);
801 serio_cleanup(serio);
804 static void serio_attach_driver(struct serio_driver *drv)
806 int error;
808 error = driver_attach(&drv->driver);
809 if (error)
810 pr_warning("driver_attach() failed for %s with error %d\n",
811 drv->driver.name, error);
814 int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name)
816 bool manual_bind = drv->manual_bind;
817 int error;
819 drv->driver.bus = &serio_bus;
820 drv->driver.owner = owner;
821 drv->driver.mod_name = mod_name;
824 * Temporarily disable automatic binding because probing
825 * takes long time and we are better off doing it in kseriod
827 drv->manual_bind = true;
829 error = driver_register(&drv->driver);
830 if (error) {
831 pr_err("driver_register() failed for %s, error: %d\n",
832 drv->driver.name, error);
833 return error;
837 * Restore original bind mode and let kseriod bind the
838 * driver to free ports
840 if (!manual_bind) {
841 drv->manual_bind = false;
842 error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER);
843 if (error) {
844 driver_unregister(&drv->driver);
845 return error;
849 return 0;
851 EXPORT_SYMBOL(__serio_register_driver);
853 void serio_unregister_driver(struct serio_driver *drv)
855 struct serio *serio;
857 mutex_lock(&serio_mutex);
859 drv->manual_bind = true; /* so serio_find_driver ignores it */
860 serio_remove_pending_events(drv);
862 start_over:
863 list_for_each_entry(serio, &serio_list, node) {
864 if (serio->drv == drv) {
865 serio_disconnect_port(serio);
866 serio_find_driver(serio);
867 /* we could've deleted some ports, restart */
868 goto start_over;
872 driver_unregister(&drv->driver);
873 mutex_unlock(&serio_mutex);
875 EXPORT_SYMBOL(serio_unregister_driver);
877 static void serio_set_drv(struct serio *serio, struct serio_driver *drv)
879 serio_pause_rx(serio);
880 serio->drv = drv;
881 serio_continue_rx(serio);
884 static int serio_bus_match(struct device *dev, struct device_driver *drv)
886 struct serio *serio = to_serio_port(dev);
887 struct serio_driver *serio_drv = to_serio_driver(drv);
889 if (serio->manual_bind || serio_drv->manual_bind)
890 return 0;
892 return serio_match_port(serio_drv->id_table, serio);
895 #ifdef CONFIG_HOTPLUG
897 #define SERIO_ADD_UEVENT_VAR(fmt, val...) \
898 do { \
899 int err = add_uevent_var(env, fmt, val); \
900 if (err) \
901 return err; \
902 } while (0)
904 static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
906 struct serio *serio;
908 if (!dev)
909 return -ENODEV;
911 serio = to_serio_port(dev);
913 SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type);
914 SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
915 SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
916 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
917 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
918 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
920 return 0;
922 #undef SERIO_ADD_UEVENT_VAR
924 #else
926 static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
928 return -ENODEV;
931 #endif /* CONFIG_HOTPLUG */
933 #ifdef CONFIG_PM
934 static int serio_suspend(struct device *dev)
936 struct serio *serio = to_serio_port(dev);
938 serio_cleanup(serio);
940 return 0;
943 static int serio_resume(struct device *dev)
945 struct serio *serio = to_serio_port(dev);
948 * Driver reconnect can take a while, so better let kseriod
949 * deal with it.
951 serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT);
953 return 0;
956 static const struct dev_pm_ops serio_pm_ops = {
957 .suspend = serio_suspend,
958 .resume = serio_resume,
959 .poweroff = serio_suspend,
960 .restore = serio_resume,
962 #endif /* CONFIG_PM */
964 /* called from serio_driver->connect/disconnect methods under serio_mutex */
965 int serio_open(struct serio *serio, struct serio_driver *drv)
967 serio_set_drv(serio, drv);
969 if (serio->open && serio->open(serio)) {
970 serio_set_drv(serio, NULL);
971 return -1;
973 return 0;
975 EXPORT_SYMBOL(serio_open);
977 /* called from serio_driver->connect/disconnect methods under serio_mutex */
978 void serio_close(struct serio *serio)
980 if (serio->close)
981 serio->close(serio);
983 serio_set_drv(serio, NULL);
985 EXPORT_SYMBOL(serio_close);
987 irqreturn_t serio_interrupt(struct serio *serio,
988 unsigned char data, unsigned int dfl)
990 unsigned long flags;
991 irqreturn_t ret = IRQ_NONE;
993 spin_lock_irqsave(&serio->lock, flags);
995 if (likely(serio->drv)) {
996 ret = serio->drv->interrupt(serio, data, dfl);
997 } else if (!dfl && device_is_registered(&serio->dev)) {
998 serio_rescan(serio);
999 ret = IRQ_HANDLED;
1002 spin_unlock_irqrestore(&serio->lock, flags);
1004 return ret;
1006 EXPORT_SYMBOL(serio_interrupt);
1008 static struct bus_type serio_bus = {
1009 .name = "serio",
1010 .dev_attrs = serio_device_attrs,
1011 .drv_attrs = serio_driver_attrs,
1012 .match = serio_bus_match,
1013 .uevent = serio_uevent,
1014 .probe = serio_driver_probe,
1015 .remove = serio_driver_remove,
1016 .shutdown = serio_shutdown,
1017 #ifdef CONFIG_PM
1018 .pm = &serio_pm_ops,
1019 #endif
1022 static int __init serio_init(void)
1024 int error;
1026 error = bus_register(&serio_bus);
1027 if (error) {
1028 pr_err("Failed to register serio bus, error: %d\n", error);
1029 return error;
1032 return 0;
1035 static void __exit serio_exit(void)
1037 bus_unregister(&serio_bus);
1040 * There should not be any outstanding events but work may
1041 * still be scheduled so simply cancel it.
1043 cancel_work_sync(&serio_event_work);
1046 subsys_initcall(serio_init);
1047 module_exit(serio_exit);