2 * Copyright (C) 2006 - 2007 Ivo van Doorn
3 * Copyright (C) 2007 Dmitry Torokhov
4 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the
18 * Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/workqueue.h>
26 #include <linux/capability.h>
27 #include <linux/list.h>
28 #include <linux/mutex.h>
29 #include <linux/rfkill.h>
30 #include <linux/sched.h>
31 #include <linux/spinlock.h>
32 #include <linux/device.h>
33 #include <linux/miscdevice.h>
34 #include <linux/wait.h>
35 #include <linux/poll.h>
37 #include <linux/slab.h>
41 #define POLL_INTERVAL (5 * HZ)
43 #define RFKILL_BLOCK_HW BIT(0)
44 #define RFKILL_BLOCK_SW BIT(1)
45 #define RFKILL_BLOCK_SW_PREV BIT(2)
46 #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
49 #define RFKILL_BLOCK_SW_SETCALL BIT(31)
55 enum rfkill_type type
;
64 const struct rfkill_ops
*ops
;
67 #ifdef CONFIG_RFKILL_LEDS
68 struct led_trigger led_trigger
;
69 const char *ledtrigname
;
73 struct list_head node
;
75 struct delayed_work poll_work
;
76 struct work_struct uevent_work
;
77 struct work_struct sync_work
;
79 #define to_rfkill(d) container_of(d, struct rfkill, dev)
81 struct rfkill_int_event
{
82 struct list_head list
;
83 struct rfkill_event ev
;
87 struct list_head list
;
88 struct list_head events
;
90 wait_queue_head_t read_wait
;
95 MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
96 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
97 MODULE_DESCRIPTION("RF switch support");
98 MODULE_LICENSE("GPL");
102 * The locking here should be made much smarter, we currently have
103 * a bit of a stupid situation because drivers might want to register
104 * the rfkill struct under their own lock, and take this lock during
105 * rfkill method calls -- which will cause an AB-BA deadlock situation.
107 * To fix that, we need to rework this code here to be mostly lock-free
108 * and only use the mutex for list manipulations, not to protect the
109 * various other global variables. Then we can avoid holding the mutex
110 * around driver operations, and all is happy.
112 static LIST_HEAD(rfkill_list
); /* list of registered rf switches */
113 static DEFINE_MUTEX(rfkill_global_mutex
);
114 static LIST_HEAD(rfkill_fds
); /* list of open fds of /dev/rfkill */
116 static unsigned int rfkill_default_state
= 1;
117 module_param_named(default_state
, rfkill_default_state
, uint
, 0444);
118 MODULE_PARM_DESC(default_state
,
119 "Default initial state for all radio types, 0 = radio off");
123 } rfkill_global_states
[NUM_RFKILL_TYPES
];
125 static bool rfkill_epo_lock_active
;
128 #ifdef CONFIG_RFKILL_LEDS
129 static void rfkill_led_trigger_event(struct rfkill
*rfkill
)
131 struct led_trigger
*trigger
;
133 if (!rfkill
->registered
)
136 trigger
= &rfkill
->led_trigger
;
138 if (rfkill
->state
& RFKILL_BLOCK_ANY
)
139 led_trigger_event(trigger
, LED_OFF
);
141 led_trigger_event(trigger
, LED_FULL
);
144 static void rfkill_led_trigger_activate(struct led_classdev
*led
)
146 struct rfkill
*rfkill
;
148 rfkill
= container_of(led
->trigger
, struct rfkill
, led_trigger
);
150 rfkill_led_trigger_event(rfkill
);
153 const char *rfkill_get_led_trigger_name(struct rfkill
*rfkill
)
155 return rfkill
->led_trigger
.name
;
157 EXPORT_SYMBOL(rfkill_get_led_trigger_name
);
159 void rfkill_set_led_trigger_name(struct rfkill
*rfkill
, const char *name
)
163 rfkill
->ledtrigname
= name
;
165 EXPORT_SYMBOL(rfkill_set_led_trigger_name
);
167 static int rfkill_led_trigger_register(struct rfkill
*rfkill
)
169 rfkill
->led_trigger
.name
= rfkill
->ledtrigname
170 ? : dev_name(&rfkill
->dev
);
171 rfkill
->led_trigger
.activate
= rfkill_led_trigger_activate
;
172 return led_trigger_register(&rfkill
->led_trigger
);
175 static void rfkill_led_trigger_unregister(struct rfkill
*rfkill
)
177 led_trigger_unregister(&rfkill
->led_trigger
);
180 static void rfkill_led_trigger_event(struct rfkill
*rfkill
)
184 static inline int rfkill_led_trigger_register(struct rfkill
*rfkill
)
189 static inline void rfkill_led_trigger_unregister(struct rfkill
*rfkill
)
192 #endif /* CONFIG_RFKILL_LEDS */
194 static void rfkill_fill_event(struct rfkill_event
*ev
, struct rfkill
*rfkill
,
195 enum rfkill_operation op
)
199 ev
->idx
= rfkill
->idx
;
200 ev
->type
= rfkill
->type
;
203 spin_lock_irqsave(&rfkill
->lock
, flags
);
204 ev
->hard
= !!(rfkill
->state
& RFKILL_BLOCK_HW
);
205 ev
->soft
= !!(rfkill
->state
& (RFKILL_BLOCK_SW
|
206 RFKILL_BLOCK_SW_PREV
));
207 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
210 static void rfkill_send_events(struct rfkill
*rfkill
, enum rfkill_operation op
)
212 struct rfkill_data
*data
;
213 struct rfkill_int_event
*ev
;
215 list_for_each_entry(data
, &rfkill_fds
, list
) {
216 ev
= kzalloc(sizeof(*ev
), GFP_KERNEL
);
219 rfkill_fill_event(&ev
->ev
, rfkill
, op
);
220 mutex_lock(&data
->mtx
);
221 list_add_tail(&ev
->list
, &data
->events
);
222 mutex_unlock(&data
->mtx
);
223 wake_up_interruptible(&data
->read_wait
);
227 static void rfkill_event(struct rfkill
*rfkill
)
229 if (!rfkill
->registered
)
232 kobject_uevent(&rfkill
->dev
.kobj
, KOBJ_CHANGE
);
234 /* also send event to /dev/rfkill */
235 rfkill_send_events(rfkill
, RFKILL_OP_CHANGE
);
238 static bool __rfkill_set_hw_state(struct rfkill
*rfkill
,
239 bool blocked
, bool *change
)
246 spin_lock_irqsave(&rfkill
->lock
, flags
);
247 prev
= !!(rfkill
->state
& RFKILL_BLOCK_HW
);
249 rfkill
->state
|= RFKILL_BLOCK_HW
;
251 rfkill
->state
&= ~RFKILL_BLOCK_HW
;
252 *change
= prev
!= blocked
;
253 any
= !!(rfkill
->state
& RFKILL_BLOCK_ANY
);
254 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
256 rfkill_led_trigger_event(rfkill
);
262 * rfkill_set_block - wrapper for set_block method
264 * @rfkill: the rfkill struct to use
265 * @blocked: the new software state
267 * Calls the set_block method (when applicable) and handles notifications
270 static void rfkill_set_block(struct rfkill
*rfkill
, bool blocked
)
276 if (unlikely(rfkill
->dev
.power
.power_state
.event
& PM_EVENT_SLEEP
))
280 * Some platforms (...!) generate input events which affect the
281 * _hard_ kill state -- whenever something tries to change the
282 * current software state query the hardware state too.
284 if (rfkill
->ops
->query
)
285 rfkill
->ops
->query(rfkill
, rfkill
->data
);
287 spin_lock_irqsave(&rfkill
->lock
, flags
);
288 prev
= rfkill
->state
& RFKILL_BLOCK_SW
;
290 if (rfkill
->state
& RFKILL_BLOCK_SW
)
291 rfkill
->state
|= RFKILL_BLOCK_SW_PREV
;
293 rfkill
->state
&= ~RFKILL_BLOCK_SW_PREV
;
296 rfkill
->state
|= RFKILL_BLOCK_SW
;
298 rfkill
->state
&= ~RFKILL_BLOCK_SW
;
300 rfkill
->state
|= RFKILL_BLOCK_SW_SETCALL
;
301 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
303 err
= rfkill
->ops
->set_block(rfkill
->data
, blocked
);
305 spin_lock_irqsave(&rfkill
->lock
, flags
);
308 * Failed -- reset status to _prev, this may be different
309 * from what set set _PREV to earlier in this function
310 * if rfkill_set_sw_state was invoked.
312 if (rfkill
->state
& RFKILL_BLOCK_SW_PREV
)
313 rfkill
->state
|= RFKILL_BLOCK_SW
;
315 rfkill
->state
&= ~RFKILL_BLOCK_SW
;
317 rfkill
->state
&= ~RFKILL_BLOCK_SW_SETCALL
;
318 rfkill
->state
&= ~RFKILL_BLOCK_SW_PREV
;
319 curr
= rfkill
->state
& RFKILL_BLOCK_SW
;
320 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
322 rfkill_led_trigger_event(rfkill
);
325 rfkill_event(rfkill
);
328 #ifdef CONFIG_RFKILL_INPUT
329 static atomic_t rfkill_input_disabled
= ATOMIC_INIT(0);
332 * __rfkill_switch_all - Toggle state of all switches of given type
333 * @type: type of interfaces to be affected
334 * @state: the new state
336 * This function sets the state of all switches of given type,
337 * unless a specific switch is claimed by userspace (in which case,
338 * that switch is left alone) or suspended.
340 * Caller must have acquired rfkill_global_mutex.
342 static void __rfkill_switch_all(const enum rfkill_type type
, bool blocked
)
344 struct rfkill
*rfkill
;
346 rfkill_global_states
[type
].cur
= blocked
;
347 list_for_each_entry(rfkill
, &rfkill_list
, node
) {
348 if (rfkill
->type
!= type
&& type
!= RFKILL_TYPE_ALL
)
351 rfkill_set_block(rfkill
, blocked
);
356 * rfkill_switch_all - Toggle state of all switches of given type
357 * @type: type of interfaces to be affected
358 * @state: the new state
360 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
361 * Please refer to __rfkill_switch_all() for details.
363 * Does nothing if the EPO lock is active.
365 void rfkill_switch_all(enum rfkill_type type
, bool blocked
)
367 if (atomic_read(&rfkill_input_disabled
))
370 mutex_lock(&rfkill_global_mutex
);
372 if (!rfkill_epo_lock_active
)
373 __rfkill_switch_all(type
, blocked
);
375 mutex_unlock(&rfkill_global_mutex
);
379 * rfkill_epo - emergency power off all transmitters
381 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
382 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
384 * The global state before the EPO is saved and can be restored later
385 * using rfkill_restore_states().
387 void rfkill_epo(void)
389 struct rfkill
*rfkill
;
392 if (atomic_read(&rfkill_input_disabled
))
395 mutex_lock(&rfkill_global_mutex
);
397 rfkill_epo_lock_active
= true;
398 list_for_each_entry(rfkill
, &rfkill_list
, node
)
399 rfkill_set_block(rfkill
, true);
401 for (i
= 0; i
< NUM_RFKILL_TYPES
; i
++) {
402 rfkill_global_states
[i
].sav
= rfkill_global_states
[i
].cur
;
403 rfkill_global_states
[i
].cur
= true;
406 mutex_unlock(&rfkill_global_mutex
);
410 * rfkill_restore_states - restore global states
412 * Restore (and sync switches to) the global state from the
413 * states in rfkill_default_states. This can undo the effects of
414 * a call to rfkill_epo().
416 void rfkill_restore_states(void)
420 if (atomic_read(&rfkill_input_disabled
))
423 mutex_lock(&rfkill_global_mutex
);
425 rfkill_epo_lock_active
= false;
426 for (i
= 0; i
< NUM_RFKILL_TYPES
; i
++)
427 __rfkill_switch_all(i
, rfkill_global_states
[i
].sav
);
428 mutex_unlock(&rfkill_global_mutex
);
432 * rfkill_remove_epo_lock - unlock state changes
434 * Used by rfkill-input manually unlock state changes, when
435 * the EPO switch is deactivated.
437 void rfkill_remove_epo_lock(void)
439 if (atomic_read(&rfkill_input_disabled
))
442 mutex_lock(&rfkill_global_mutex
);
443 rfkill_epo_lock_active
= false;
444 mutex_unlock(&rfkill_global_mutex
);
448 * rfkill_is_epo_lock_active - returns true EPO is active
450 * Returns 0 (false) if there is NOT an active EPO contidion,
451 * and 1 (true) if there is an active EPO contition, which
452 * locks all radios in one of the BLOCKED states.
454 * Can be called in atomic context.
456 bool rfkill_is_epo_lock_active(void)
458 return rfkill_epo_lock_active
;
462 * rfkill_get_global_sw_state - returns global state for a type
463 * @type: the type to get the global state of
465 * Returns the current global state for a given wireless
468 bool rfkill_get_global_sw_state(const enum rfkill_type type
)
470 return rfkill_global_states
[type
].cur
;
475 bool rfkill_set_hw_state(struct rfkill
*rfkill
, bool blocked
)
479 ret
= __rfkill_set_hw_state(rfkill
, blocked
, &change
);
481 if (!rfkill
->registered
)
485 schedule_work(&rfkill
->uevent_work
);
489 EXPORT_SYMBOL(rfkill_set_hw_state
);
491 static void __rfkill_set_sw_state(struct rfkill
*rfkill
, bool blocked
)
493 u32 bit
= RFKILL_BLOCK_SW
;
495 /* if in a ops->set_block right now, use other bit */
496 if (rfkill
->state
& RFKILL_BLOCK_SW_SETCALL
)
497 bit
= RFKILL_BLOCK_SW_PREV
;
500 rfkill
->state
|= bit
;
502 rfkill
->state
&= ~bit
;
505 bool rfkill_set_sw_state(struct rfkill
*rfkill
, bool blocked
)
512 spin_lock_irqsave(&rfkill
->lock
, flags
);
513 prev
= !!(rfkill
->state
& RFKILL_BLOCK_SW
);
514 __rfkill_set_sw_state(rfkill
, blocked
);
515 hwblock
= !!(rfkill
->state
& RFKILL_BLOCK_HW
);
516 blocked
= blocked
|| hwblock
;
517 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
519 if (!rfkill
->registered
)
522 if (prev
!= blocked
&& !hwblock
)
523 schedule_work(&rfkill
->uevent_work
);
525 rfkill_led_trigger_event(rfkill
);
529 EXPORT_SYMBOL(rfkill_set_sw_state
);
531 void rfkill_init_sw_state(struct rfkill
*rfkill
, bool blocked
)
536 BUG_ON(rfkill
->registered
);
538 spin_lock_irqsave(&rfkill
->lock
, flags
);
539 __rfkill_set_sw_state(rfkill
, blocked
);
540 rfkill
->persistent
= true;
541 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
543 EXPORT_SYMBOL(rfkill_init_sw_state
);
545 void rfkill_set_states(struct rfkill
*rfkill
, bool sw
, bool hw
)
552 spin_lock_irqsave(&rfkill
->lock
, flags
);
555 * No need to care about prev/setblock ... this is for uevent only
556 * and that will get triggered by rfkill_set_block anyway.
558 swprev
= !!(rfkill
->state
& RFKILL_BLOCK_SW
);
559 hwprev
= !!(rfkill
->state
& RFKILL_BLOCK_HW
);
560 __rfkill_set_sw_state(rfkill
, sw
);
562 rfkill
->state
|= RFKILL_BLOCK_HW
;
564 rfkill
->state
&= ~RFKILL_BLOCK_HW
;
566 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
568 if (!rfkill
->registered
) {
569 rfkill
->persistent
= true;
571 if (swprev
!= sw
|| hwprev
!= hw
)
572 schedule_work(&rfkill
->uevent_work
);
574 rfkill_led_trigger_event(rfkill
);
577 EXPORT_SYMBOL(rfkill_set_states
);
579 static ssize_t
rfkill_name_show(struct device
*dev
,
580 struct device_attribute
*attr
,
583 struct rfkill
*rfkill
= to_rfkill(dev
);
585 return sprintf(buf
, "%s\n", rfkill
->name
);
588 static const char *rfkill_get_type_str(enum rfkill_type type
)
590 BUILD_BUG_ON(NUM_RFKILL_TYPES
!= RFKILL_TYPE_FM
+ 1);
593 case RFKILL_TYPE_WLAN
:
595 case RFKILL_TYPE_BLUETOOTH
:
597 case RFKILL_TYPE_UWB
:
598 return "ultrawideband";
599 case RFKILL_TYPE_WIMAX
:
601 case RFKILL_TYPE_WWAN
:
603 case RFKILL_TYPE_GPS
:
612 static ssize_t
rfkill_type_show(struct device
*dev
,
613 struct device_attribute
*attr
,
616 struct rfkill
*rfkill
= to_rfkill(dev
);
618 return sprintf(buf
, "%s\n", rfkill_get_type_str(rfkill
->type
));
621 static ssize_t
rfkill_idx_show(struct device
*dev
,
622 struct device_attribute
*attr
,
625 struct rfkill
*rfkill
= to_rfkill(dev
);
627 return sprintf(buf
, "%d\n", rfkill
->idx
);
630 static ssize_t
rfkill_persistent_show(struct device
*dev
,
631 struct device_attribute
*attr
,
634 struct rfkill
*rfkill
= to_rfkill(dev
);
636 return sprintf(buf
, "%d\n", rfkill
->persistent
);
639 static ssize_t
rfkill_hard_show(struct device
*dev
,
640 struct device_attribute
*attr
,
643 struct rfkill
*rfkill
= to_rfkill(dev
);
645 return sprintf(buf
, "%d\n", (rfkill
->state
& RFKILL_BLOCK_HW
) ? 1 : 0 );
648 static ssize_t
rfkill_soft_show(struct device
*dev
,
649 struct device_attribute
*attr
,
652 struct rfkill
*rfkill
= to_rfkill(dev
);
654 return sprintf(buf
, "%d\n", (rfkill
->state
& RFKILL_BLOCK_SW
) ? 1 : 0 );
657 static ssize_t
rfkill_soft_store(struct device
*dev
,
658 struct device_attribute
*attr
,
659 const char *buf
, size_t count
)
661 struct rfkill
*rfkill
= to_rfkill(dev
);
665 if (!capable(CAP_NET_ADMIN
))
668 err
= kstrtoul(buf
, 0, &state
);
675 mutex_lock(&rfkill_global_mutex
);
676 rfkill_set_block(rfkill
, state
);
677 mutex_unlock(&rfkill_global_mutex
);
682 static u8
user_state_from_blocked(unsigned long state
)
684 if (state
& RFKILL_BLOCK_HW
)
685 return RFKILL_USER_STATE_HARD_BLOCKED
;
686 if (state
& RFKILL_BLOCK_SW
)
687 return RFKILL_USER_STATE_SOFT_BLOCKED
;
689 return RFKILL_USER_STATE_UNBLOCKED
;
692 static ssize_t
rfkill_state_show(struct device
*dev
,
693 struct device_attribute
*attr
,
696 struct rfkill
*rfkill
= to_rfkill(dev
);
698 return sprintf(buf
, "%d\n", user_state_from_blocked(rfkill
->state
));
701 static ssize_t
rfkill_state_store(struct device
*dev
,
702 struct device_attribute
*attr
,
703 const char *buf
, size_t count
)
705 struct rfkill
*rfkill
= to_rfkill(dev
);
709 if (!capable(CAP_NET_ADMIN
))
712 err
= kstrtoul(buf
, 0, &state
);
716 if (state
!= RFKILL_USER_STATE_SOFT_BLOCKED
&&
717 state
!= RFKILL_USER_STATE_UNBLOCKED
)
720 mutex_lock(&rfkill_global_mutex
);
721 rfkill_set_block(rfkill
, state
== RFKILL_USER_STATE_SOFT_BLOCKED
);
722 mutex_unlock(&rfkill_global_mutex
);
727 static ssize_t
rfkill_claim_show(struct device
*dev
,
728 struct device_attribute
*attr
,
731 return sprintf(buf
, "%d\n", 0);
734 static ssize_t
rfkill_claim_store(struct device
*dev
,
735 struct device_attribute
*attr
,
736 const char *buf
, size_t count
)
741 static struct device_attribute rfkill_dev_attrs
[] = {
742 __ATTR(name
, S_IRUGO
, rfkill_name_show
, NULL
),
743 __ATTR(type
, S_IRUGO
, rfkill_type_show
, NULL
),
744 __ATTR(index
, S_IRUGO
, rfkill_idx_show
, NULL
),
745 __ATTR(persistent
, S_IRUGO
, rfkill_persistent_show
, NULL
),
746 __ATTR(state
, S_IRUGO
|S_IWUSR
, rfkill_state_show
, rfkill_state_store
),
747 __ATTR(claim
, S_IRUGO
|S_IWUSR
, rfkill_claim_show
, rfkill_claim_store
),
748 __ATTR(soft
, S_IRUGO
|S_IWUSR
, rfkill_soft_show
, rfkill_soft_store
),
749 __ATTR(hard
, S_IRUGO
, rfkill_hard_show
, NULL
),
753 static void rfkill_release(struct device
*dev
)
755 struct rfkill
*rfkill
= to_rfkill(dev
);
760 static int rfkill_dev_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
762 struct rfkill
*rfkill
= to_rfkill(dev
);
767 error
= add_uevent_var(env
, "RFKILL_NAME=%s", rfkill
->name
);
770 error
= add_uevent_var(env
, "RFKILL_TYPE=%s",
771 rfkill_get_type_str(rfkill
->type
));
774 spin_lock_irqsave(&rfkill
->lock
, flags
);
775 state
= rfkill
->state
;
776 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
777 error
= add_uevent_var(env
, "RFKILL_STATE=%d",
778 user_state_from_blocked(state
));
782 void rfkill_pause_polling(struct rfkill
*rfkill
)
786 if (!rfkill
->ops
->poll
)
789 cancel_delayed_work_sync(&rfkill
->poll_work
);
791 EXPORT_SYMBOL(rfkill_pause_polling
);
793 void rfkill_resume_polling(struct rfkill
*rfkill
)
797 if (!rfkill
->ops
->poll
)
800 schedule_work(&rfkill
->poll_work
.work
);
802 EXPORT_SYMBOL(rfkill_resume_polling
);
804 static int rfkill_suspend(struct device
*dev
, pm_message_t state
)
806 struct rfkill
*rfkill
= to_rfkill(dev
);
808 rfkill_pause_polling(rfkill
);
813 static int rfkill_resume(struct device
*dev
)
815 struct rfkill
*rfkill
= to_rfkill(dev
);
818 if (!rfkill
->persistent
) {
819 cur
= !!(rfkill
->state
& RFKILL_BLOCK_SW
);
820 rfkill_set_block(rfkill
, cur
);
823 rfkill_resume_polling(rfkill
);
828 static struct class rfkill_class
= {
830 .dev_release
= rfkill_release
,
831 .dev_attrs
= rfkill_dev_attrs
,
832 .dev_uevent
= rfkill_dev_uevent
,
833 .suspend
= rfkill_suspend
,
834 .resume
= rfkill_resume
,
837 bool rfkill_blocked(struct rfkill
*rfkill
)
842 spin_lock_irqsave(&rfkill
->lock
, flags
);
843 state
= rfkill
->state
;
844 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
846 return !!(state
& RFKILL_BLOCK_ANY
);
848 EXPORT_SYMBOL(rfkill_blocked
);
851 struct rfkill
* __must_check
rfkill_alloc(const char *name
,
852 struct device
*parent
,
853 const enum rfkill_type type
,
854 const struct rfkill_ops
*ops
,
857 struct rfkill
*rfkill
;
863 if (WARN_ON(!ops
->set_block
))
869 if (WARN_ON(type
== RFKILL_TYPE_ALL
|| type
>= NUM_RFKILL_TYPES
))
872 rfkill
= kzalloc(sizeof(*rfkill
), GFP_KERNEL
);
876 spin_lock_init(&rfkill
->lock
);
877 INIT_LIST_HEAD(&rfkill
->node
);
881 rfkill
->data
= ops_data
;
884 dev
->class = &rfkill_class
;
885 dev
->parent
= parent
;
886 device_initialize(dev
);
890 EXPORT_SYMBOL(rfkill_alloc
);
892 static void rfkill_poll(struct work_struct
*work
)
894 struct rfkill
*rfkill
;
896 rfkill
= container_of(work
, struct rfkill
, poll_work
.work
);
899 * Poll hardware state -- driver will use one of the
900 * rfkill_set{,_hw,_sw}_state functions and use its
901 * return value to update the current status.
903 rfkill
->ops
->poll(rfkill
, rfkill
->data
);
905 schedule_delayed_work(&rfkill
->poll_work
,
906 round_jiffies_relative(POLL_INTERVAL
));
909 static void rfkill_uevent_work(struct work_struct
*work
)
911 struct rfkill
*rfkill
;
913 rfkill
= container_of(work
, struct rfkill
, uevent_work
);
915 mutex_lock(&rfkill_global_mutex
);
916 rfkill_event(rfkill
);
917 mutex_unlock(&rfkill_global_mutex
);
920 static void rfkill_sync_work(struct work_struct
*work
)
922 struct rfkill
*rfkill
;
925 rfkill
= container_of(work
, struct rfkill
, sync_work
);
927 mutex_lock(&rfkill_global_mutex
);
928 cur
= rfkill_global_states
[rfkill
->type
].cur
;
929 rfkill_set_block(rfkill
, cur
);
930 mutex_unlock(&rfkill_global_mutex
);
933 int __must_check
rfkill_register(struct rfkill
*rfkill
)
935 static unsigned long rfkill_no
;
936 struct device
*dev
= &rfkill
->dev
;
941 mutex_lock(&rfkill_global_mutex
);
943 if (rfkill
->registered
) {
948 rfkill
->idx
= rfkill_no
;
949 dev_set_name(dev
, "rfkill%lu", rfkill_no
);
952 list_add_tail(&rfkill
->node
, &rfkill_list
);
954 error
= device_add(dev
);
958 error
= rfkill_led_trigger_register(rfkill
);
962 rfkill
->registered
= true;
964 INIT_DELAYED_WORK(&rfkill
->poll_work
, rfkill_poll
);
965 INIT_WORK(&rfkill
->uevent_work
, rfkill_uevent_work
);
966 INIT_WORK(&rfkill
->sync_work
, rfkill_sync_work
);
968 if (rfkill
->ops
->poll
)
969 schedule_delayed_work(&rfkill
->poll_work
,
970 round_jiffies_relative(POLL_INTERVAL
));
972 if (!rfkill
->persistent
|| rfkill_epo_lock_active
) {
973 schedule_work(&rfkill
->sync_work
);
975 #ifdef CONFIG_RFKILL_INPUT
976 bool soft_blocked
= !!(rfkill
->state
& RFKILL_BLOCK_SW
);
978 if (!atomic_read(&rfkill_input_disabled
))
979 __rfkill_switch_all(rfkill
->type
, soft_blocked
);
983 rfkill_send_events(rfkill
, RFKILL_OP_ADD
);
985 mutex_unlock(&rfkill_global_mutex
);
989 device_del(&rfkill
->dev
);
991 list_del_init(&rfkill
->node
);
993 mutex_unlock(&rfkill_global_mutex
);
996 EXPORT_SYMBOL(rfkill_register
);
998 void rfkill_unregister(struct rfkill
*rfkill
)
1002 if (rfkill
->ops
->poll
)
1003 cancel_delayed_work_sync(&rfkill
->poll_work
);
1005 cancel_work_sync(&rfkill
->uevent_work
);
1006 cancel_work_sync(&rfkill
->sync_work
);
1008 rfkill
->registered
= false;
1010 device_del(&rfkill
->dev
);
1012 mutex_lock(&rfkill_global_mutex
);
1013 rfkill_send_events(rfkill
, RFKILL_OP_DEL
);
1014 list_del_init(&rfkill
->node
);
1015 mutex_unlock(&rfkill_global_mutex
);
1017 rfkill_led_trigger_unregister(rfkill
);
1019 EXPORT_SYMBOL(rfkill_unregister
);
1021 void rfkill_destroy(struct rfkill
*rfkill
)
1024 put_device(&rfkill
->dev
);
1026 EXPORT_SYMBOL(rfkill_destroy
);
1028 static int rfkill_fop_open(struct inode
*inode
, struct file
*file
)
1030 struct rfkill_data
*data
;
1031 struct rfkill
*rfkill
;
1032 struct rfkill_int_event
*ev
, *tmp
;
1034 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
1038 INIT_LIST_HEAD(&data
->events
);
1039 mutex_init(&data
->mtx
);
1040 init_waitqueue_head(&data
->read_wait
);
1042 mutex_lock(&rfkill_global_mutex
);
1043 mutex_lock(&data
->mtx
);
1045 * start getting events from elsewhere but hold mtx to get
1046 * startup events added first
1049 list_for_each_entry(rfkill
, &rfkill_list
, node
) {
1050 ev
= kzalloc(sizeof(*ev
), GFP_KERNEL
);
1053 rfkill_fill_event(&ev
->ev
, rfkill
, RFKILL_OP_ADD
);
1054 list_add_tail(&ev
->list
, &data
->events
);
1056 list_add(&data
->list
, &rfkill_fds
);
1057 mutex_unlock(&data
->mtx
);
1058 mutex_unlock(&rfkill_global_mutex
);
1060 file
->private_data
= data
;
1062 return nonseekable_open(inode
, file
);
1065 mutex_unlock(&data
->mtx
);
1066 mutex_unlock(&rfkill_global_mutex
);
1067 mutex_destroy(&data
->mtx
);
1068 list_for_each_entry_safe(ev
, tmp
, &data
->events
, list
)
1074 static unsigned int rfkill_fop_poll(struct file
*file
, poll_table
*wait
)
1076 struct rfkill_data
*data
= file
->private_data
;
1077 unsigned int res
= POLLOUT
| POLLWRNORM
;
1079 poll_wait(file
, &data
->read_wait
, wait
);
1081 mutex_lock(&data
->mtx
);
1082 if (!list_empty(&data
->events
))
1083 res
= POLLIN
| POLLRDNORM
;
1084 mutex_unlock(&data
->mtx
);
1089 static bool rfkill_readable(struct rfkill_data
*data
)
1093 mutex_lock(&data
->mtx
);
1094 r
= !list_empty(&data
->events
);
1095 mutex_unlock(&data
->mtx
);
1100 static ssize_t
rfkill_fop_read(struct file
*file
, char __user
*buf
,
1101 size_t count
, loff_t
*pos
)
1103 struct rfkill_data
*data
= file
->private_data
;
1104 struct rfkill_int_event
*ev
;
1108 mutex_lock(&data
->mtx
);
1110 while (list_empty(&data
->events
)) {
1111 if (file
->f_flags
& O_NONBLOCK
) {
1115 mutex_unlock(&data
->mtx
);
1116 ret
= wait_event_interruptible(data
->read_wait
,
1117 rfkill_readable(data
));
1118 mutex_lock(&data
->mtx
);
1124 ev
= list_first_entry(&data
->events
, struct rfkill_int_event
,
1127 sz
= min_t(unsigned long, sizeof(ev
->ev
), count
);
1129 if (copy_to_user(buf
, &ev
->ev
, sz
))
1132 list_del(&ev
->list
);
1135 mutex_unlock(&data
->mtx
);
1139 static ssize_t
rfkill_fop_write(struct file
*file
, const char __user
*buf
,
1140 size_t count
, loff_t
*pos
)
1142 struct rfkill
*rfkill
;
1143 struct rfkill_event ev
;
1145 /* we don't need the 'hard' variable but accept it */
1146 if (count
< RFKILL_EVENT_SIZE_V1
- 1)
1150 * Copy as much data as we can accept into our 'ev' buffer,
1151 * but tell userspace how much we've copied so it can determine
1152 * our API version even in a write() call, if it cares.
1154 count
= min(count
, sizeof(ev
));
1155 if (copy_from_user(&ev
, buf
, count
))
1158 if (ev
.op
!= RFKILL_OP_CHANGE
&& ev
.op
!= RFKILL_OP_CHANGE_ALL
)
1161 if (ev
.type
>= NUM_RFKILL_TYPES
)
1164 mutex_lock(&rfkill_global_mutex
);
1166 if (ev
.op
== RFKILL_OP_CHANGE_ALL
) {
1167 if (ev
.type
== RFKILL_TYPE_ALL
) {
1169 for (i
= 0; i
< NUM_RFKILL_TYPES
; i
++)
1170 rfkill_global_states
[i
].cur
= ev
.soft
;
1172 rfkill_global_states
[ev
.type
].cur
= ev
.soft
;
1176 list_for_each_entry(rfkill
, &rfkill_list
, node
) {
1177 if (rfkill
->idx
!= ev
.idx
&& ev
.op
!= RFKILL_OP_CHANGE_ALL
)
1180 if (rfkill
->type
!= ev
.type
&& ev
.type
!= RFKILL_TYPE_ALL
)
1183 rfkill_set_block(rfkill
, ev
.soft
);
1185 mutex_unlock(&rfkill_global_mutex
);
1190 static int rfkill_fop_release(struct inode
*inode
, struct file
*file
)
1192 struct rfkill_data
*data
= file
->private_data
;
1193 struct rfkill_int_event
*ev
, *tmp
;
1195 mutex_lock(&rfkill_global_mutex
);
1196 list_del(&data
->list
);
1197 mutex_unlock(&rfkill_global_mutex
);
1199 mutex_destroy(&data
->mtx
);
1200 list_for_each_entry_safe(ev
, tmp
, &data
->events
, list
)
1203 #ifdef CONFIG_RFKILL_INPUT
1204 if (data
->input_handler
)
1205 if (atomic_dec_return(&rfkill_input_disabled
) == 0)
1206 printk(KERN_DEBUG
"rfkill: input handler enabled\n");
1214 #ifdef CONFIG_RFKILL_INPUT
1215 static long rfkill_fop_ioctl(struct file
*file
, unsigned int cmd
,
1218 struct rfkill_data
*data
= file
->private_data
;
1220 if (_IOC_TYPE(cmd
) != RFKILL_IOC_MAGIC
)
1223 if (_IOC_NR(cmd
) != RFKILL_IOC_NOINPUT
)
1226 mutex_lock(&data
->mtx
);
1228 if (!data
->input_handler
) {
1229 if (atomic_inc_return(&rfkill_input_disabled
) == 1)
1230 printk(KERN_DEBUG
"rfkill: input handler disabled\n");
1231 data
->input_handler
= true;
1234 mutex_unlock(&data
->mtx
);
1240 static const struct file_operations rfkill_fops
= {
1241 .owner
= THIS_MODULE
,
1242 .open
= rfkill_fop_open
,
1243 .read
= rfkill_fop_read
,
1244 .write
= rfkill_fop_write
,
1245 .poll
= rfkill_fop_poll
,
1246 .release
= rfkill_fop_release
,
1247 #ifdef CONFIG_RFKILL_INPUT
1248 .unlocked_ioctl
= rfkill_fop_ioctl
,
1249 .compat_ioctl
= rfkill_fop_ioctl
,
1251 .llseek
= no_llseek
,
1254 static struct miscdevice rfkill_miscdev
= {
1256 .fops
= &rfkill_fops
,
1257 .minor
= MISC_DYNAMIC_MINOR
,
1260 static int __init
rfkill_init(void)
1265 for (i
= 0; i
< NUM_RFKILL_TYPES
; i
++)
1266 rfkill_global_states
[i
].cur
= !rfkill_default_state
;
1268 error
= class_register(&rfkill_class
);
1272 error
= misc_register(&rfkill_miscdev
);
1274 class_unregister(&rfkill_class
);
1278 #ifdef CONFIG_RFKILL_INPUT
1279 error
= rfkill_handler_init();
1281 misc_deregister(&rfkill_miscdev
);
1282 class_unregister(&rfkill_class
);
1290 subsys_initcall(rfkill_init
);
1292 static void __exit
rfkill_exit(void)
1294 #ifdef CONFIG_RFKILL_INPUT
1295 rfkill_handler_exit();
1297 misc_deregister(&rfkill_miscdev
);
1298 class_unregister(&rfkill_class
);
1300 module_exit(rfkill_exit
);