2 * Copyright (C) 2006 - 2007 Ivo van Doorn
3 * Copyright (C) 2007 Dmitry Torokhov
4 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the
18 * Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/workqueue.h>
26 #include <linux/capability.h>
27 #include <linux/list.h>
28 #include <linux/mutex.h>
29 #include <linux/rfkill.h>
30 #include <linux/sched.h>
31 #include <linux/spinlock.h>
32 #include <linux/miscdevice.h>
33 #include <linux/wait.h>
34 #include <linux/poll.h>
36 #include <linux/slab.h>
40 #define POLL_INTERVAL (5 * HZ)
42 #define RFKILL_BLOCK_HW BIT(0)
43 #define RFKILL_BLOCK_SW BIT(1)
44 #define RFKILL_BLOCK_SW_PREV BIT(2)
45 #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
48 #define RFKILL_BLOCK_SW_SETCALL BIT(31)
54 enum rfkill_type type
;
63 const struct rfkill_ops
*ops
;
66 #ifdef CONFIG_RFKILL_LEDS
67 struct led_trigger led_trigger
;
68 const char *ledtrigname
;
72 struct list_head node
;
74 struct delayed_work poll_work
;
75 struct work_struct uevent_work
;
76 struct work_struct sync_work
;
78 #define to_rfkill(d) container_of(d, struct rfkill, dev)
80 struct rfkill_int_event
{
81 struct list_head list
;
82 struct rfkill_event ev
;
86 struct list_head list
;
87 struct list_head events
;
89 wait_queue_head_t read_wait
;
94 MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
95 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
96 MODULE_DESCRIPTION("RF switch support");
97 MODULE_LICENSE("GPL");
101 * The locking here should be made much smarter, we currently have
102 * a bit of a stupid situation because drivers might want to register
103 * the rfkill struct under their own lock, and take this lock during
104 * rfkill method calls -- which will cause an AB-BA deadlock situation.
106 * To fix that, we need to rework this code here to be mostly lock-free
107 * and only use the mutex for list manipulations, not to protect the
108 * various other global variables. Then we can avoid holding the mutex
109 * around driver operations, and all is happy.
111 static LIST_HEAD(rfkill_list
); /* list of registered rf switches */
112 static DEFINE_MUTEX(rfkill_global_mutex
);
113 static LIST_HEAD(rfkill_fds
); /* list of open fds of /dev/rfkill */
115 static unsigned int rfkill_default_state
= 1;
116 module_param_named(default_state
, rfkill_default_state
, uint
, 0444);
117 MODULE_PARM_DESC(default_state
,
118 "Default initial state for all radio types, 0 = radio off");
122 } rfkill_global_states
[NUM_RFKILL_TYPES
];
124 static bool rfkill_epo_lock_active
;
127 #ifdef CONFIG_RFKILL_LEDS
128 static void rfkill_led_trigger_event(struct rfkill
*rfkill
)
130 struct led_trigger
*trigger
;
132 if (!rfkill
->registered
)
135 trigger
= &rfkill
->led_trigger
;
137 if (rfkill
->state
& RFKILL_BLOCK_ANY
)
138 led_trigger_event(trigger
, LED_OFF
);
140 led_trigger_event(trigger
, LED_FULL
);
143 static void rfkill_led_trigger_activate(struct led_classdev
*led
)
145 struct rfkill
*rfkill
;
147 rfkill
= container_of(led
->trigger
, struct rfkill
, led_trigger
);
149 rfkill_led_trigger_event(rfkill
);
152 const char *rfkill_get_led_trigger_name(struct rfkill
*rfkill
)
154 return rfkill
->led_trigger
.name
;
156 EXPORT_SYMBOL(rfkill_get_led_trigger_name
);
158 void rfkill_set_led_trigger_name(struct rfkill
*rfkill
, const char *name
)
162 rfkill
->ledtrigname
= name
;
164 EXPORT_SYMBOL(rfkill_set_led_trigger_name
);
166 static int rfkill_led_trigger_register(struct rfkill
*rfkill
)
168 rfkill
->led_trigger
.name
= rfkill
->ledtrigname
169 ? : dev_name(&rfkill
->dev
);
170 rfkill
->led_trigger
.activate
= rfkill_led_trigger_activate
;
171 return led_trigger_register(&rfkill
->led_trigger
);
174 static void rfkill_led_trigger_unregister(struct rfkill
*rfkill
)
176 led_trigger_unregister(&rfkill
->led_trigger
);
179 static void rfkill_led_trigger_event(struct rfkill
*rfkill
)
183 static inline int rfkill_led_trigger_register(struct rfkill
*rfkill
)
188 static inline void rfkill_led_trigger_unregister(struct rfkill
*rfkill
)
191 #endif /* CONFIG_RFKILL_LEDS */
193 static void rfkill_fill_event(struct rfkill_event
*ev
, struct rfkill
*rfkill
,
194 enum rfkill_operation op
)
198 ev
->idx
= rfkill
->idx
;
199 ev
->type
= rfkill
->type
;
202 spin_lock_irqsave(&rfkill
->lock
, flags
);
203 ev
->hard
= !!(rfkill
->state
& RFKILL_BLOCK_HW
);
204 ev
->soft
= !!(rfkill
->state
& (RFKILL_BLOCK_SW
|
205 RFKILL_BLOCK_SW_PREV
));
206 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
209 static void rfkill_send_events(struct rfkill
*rfkill
, enum rfkill_operation op
)
211 struct rfkill_data
*data
;
212 struct rfkill_int_event
*ev
;
214 list_for_each_entry(data
, &rfkill_fds
, list
) {
215 ev
= kzalloc(sizeof(*ev
), GFP_KERNEL
);
218 rfkill_fill_event(&ev
->ev
, rfkill
, op
);
219 mutex_lock(&data
->mtx
);
220 list_add_tail(&ev
->list
, &data
->events
);
221 mutex_unlock(&data
->mtx
);
222 wake_up_interruptible(&data
->read_wait
);
226 static void rfkill_event(struct rfkill
*rfkill
)
228 if (!rfkill
->registered
)
231 kobject_uevent(&rfkill
->dev
.kobj
, KOBJ_CHANGE
);
233 /* also send event to /dev/rfkill */
234 rfkill_send_events(rfkill
, RFKILL_OP_CHANGE
);
237 static bool __rfkill_set_hw_state(struct rfkill
*rfkill
,
238 bool blocked
, bool *change
)
245 spin_lock_irqsave(&rfkill
->lock
, flags
);
246 prev
= !!(rfkill
->state
& RFKILL_BLOCK_HW
);
248 rfkill
->state
|= RFKILL_BLOCK_HW
;
250 rfkill
->state
&= ~RFKILL_BLOCK_HW
;
251 *change
= prev
!= blocked
;
252 any
= rfkill
->state
& RFKILL_BLOCK_ANY
;
253 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
255 rfkill_led_trigger_event(rfkill
);
261 * rfkill_set_block - wrapper for set_block method
263 * @rfkill: the rfkill struct to use
264 * @blocked: the new software state
266 * Calls the set_block method (when applicable) and handles notifications
269 static void rfkill_set_block(struct rfkill
*rfkill
, bool blocked
)
274 if (unlikely(rfkill
->dev
.power
.power_state
.event
& PM_EVENT_SLEEP
))
278 * Some platforms (...!) generate input events which affect the
279 * _hard_ kill state -- whenever something tries to change the
280 * current software state query the hardware state too.
282 if (rfkill
->ops
->query
)
283 rfkill
->ops
->query(rfkill
, rfkill
->data
);
285 spin_lock_irqsave(&rfkill
->lock
, flags
);
286 if (rfkill
->state
& RFKILL_BLOCK_SW
)
287 rfkill
->state
|= RFKILL_BLOCK_SW_PREV
;
289 rfkill
->state
&= ~RFKILL_BLOCK_SW_PREV
;
292 rfkill
->state
|= RFKILL_BLOCK_SW
;
294 rfkill
->state
&= ~RFKILL_BLOCK_SW
;
296 rfkill
->state
|= RFKILL_BLOCK_SW_SETCALL
;
297 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
299 err
= rfkill
->ops
->set_block(rfkill
->data
, blocked
);
301 spin_lock_irqsave(&rfkill
->lock
, flags
);
304 * Failed -- reset status to _prev, this may be different
305 * from what set set _PREV to earlier in this function
306 * if rfkill_set_sw_state was invoked.
308 if (rfkill
->state
& RFKILL_BLOCK_SW_PREV
)
309 rfkill
->state
|= RFKILL_BLOCK_SW
;
311 rfkill
->state
&= ~RFKILL_BLOCK_SW
;
313 rfkill
->state
&= ~RFKILL_BLOCK_SW_SETCALL
;
314 rfkill
->state
&= ~RFKILL_BLOCK_SW_PREV
;
315 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
317 rfkill_led_trigger_event(rfkill
);
318 rfkill_event(rfkill
);
321 #ifdef CONFIG_RFKILL_INPUT
322 static atomic_t rfkill_input_disabled
= ATOMIC_INIT(0);
325 * __rfkill_switch_all - Toggle state of all switches of given type
326 * @type: type of interfaces to be affected
327 * @state: the new state
329 * This function sets the state of all switches of given type,
330 * unless a specific switch is claimed by userspace (in which case,
331 * that switch is left alone) or suspended.
333 * Caller must have acquired rfkill_global_mutex.
335 static void __rfkill_switch_all(const enum rfkill_type type
, bool blocked
)
337 struct rfkill
*rfkill
;
339 rfkill_global_states
[type
].cur
= blocked
;
340 list_for_each_entry(rfkill
, &rfkill_list
, node
) {
341 if (rfkill
->type
!= type
)
344 rfkill_set_block(rfkill
, blocked
);
349 * rfkill_switch_all - Toggle state of all switches of given type
350 * @type: type of interfaces to be affected
351 * @state: the new state
353 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
354 * Please refer to __rfkill_switch_all() for details.
356 * Does nothing if the EPO lock is active.
358 void rfkill_switch_all(enum rfkill_type type
, bool blocked
)
360 if (atomic_read(&rfkill_input_disabled
))
363 mutex_lock(&rfkill_global_mutex
);
365 if (!rfkill_epo_lock_active
)
366 __rfkill_switch_all(type
, blocked
);
368 mutex_unlock(&rfkill_global_mutex
);
372 * rfkill_epo - emergency power off all transmitters
374 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
375 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
377 * The global state before the EPO is saved and can be restored later
378 * using rfkill_restore_states().
380 void rfkill_epo(void)
382 struct rfkill
*rfkill
;
385 if (atomic_read(&rfkill_input_disabled
))
388 mutex_lock(&rfkill_global_mutex
);
390 rfkill_epo_lock_active
= true;
391 list_for_each_entry(rfkill
, &rfkill_list
, node
)
392 rfkill_set_block(rfkill
, true);
394 for (i
= 0; i
< NUM_RFKILL_TYPES
; i
++) {
395 rfkill_global_states
[i
].sav
= rfkill_global_states
[i
].cur
;
396 rfkill_global_states
[i
].cur
= true;
399 mutex_unlock(&rfkill_global_mutex
);
403 * rfkill_restore_states - restore global states
405 * Restore (and sync switches to) the global state from the
406 * states in rfkill_default_states. This can undo the effects of
407 * a call to rfkill_epo().
409 void rfkill_restore_states(void)
413 if (atomic_read(&rfkill_input_disabled
))
416 mutex_lock(&rfkill_global_mutex
);
418 rfkill_epo_lock_active
= false;
419 for (i
= 0; i
< NUM_RFKILL_TYPES
; i
++)
420 __rfkill_switch_all(i
, rfkill_global_states
[i
].sav
);
421 mutex_unlock(&rfkill_global_mutex
);
425 * rfkill_remove_epo_lock - unlock state changes
427 * Used by rfkill-input manually unlock state changes, when
428 * the EPO switch is deactivated.
430 void rfkill_remove_epo_lock(void)
432 if (atomic_read(&rfkill_input_disabled
))
435 mutex_lock(&rfkill_global_mutex
);
436 rfkill_epo_lock_active
= false;
437 mutex_unlock(&rfkill_global_mutex
);
441 * rfkill_is_epo_lock_active - returns true EPO is active
443 * Returns 0 (false) if there is NOT an active EPO contidion,
444 * and 1 (true) if there is an active EPO contition, which
445 * locks all radios in one of the BLOCKED states.
447 * Can be called in atomic context.
449 bool rfkill_is_epo_lock_active(void)
451 return rfkill_epo_lock_active
;
455 * rfkill_get_global_sw_state - returns global state for a type
456 * @type: the type to get the global state of
458 * Returns the current global state for a given wireless
461 bool rfkill_get_global_sw_state(const enum rfkill_type type
)
463 return rfkill_global_states
[type
].cur
;
468 bool rfkill_set_hw_state(struct rfkill
*rfkill
, bool blocked
)
472 ret
= __rfkill_set_hw_state(rfkill
, blocked
, &change
);
474 if (!rfkill
->registered
)
478 schedule_work(&rfkill
->uevent_work
);
482 EXPORT_SYMBOL(rfkill_set_hw_state
);
484 static void __rfkill_set_sw_state(struct rfkill
*rfkill
, bool blocked
)
486 u32 bit
= RFKILL_BLOCK_SW
;
488 /* if in a ops->set_block right now, use other bit */
489 if (rfkill
->state
& RFKILL_BLOCK_SW_SETCALL
)
490 bit
= RFKILL_BLOCK_SW_PREV
;
493 rfkill
->state
|= bit
;
495 rfkill
->state
&= ~bit
;
498 bool rfkill_set_sw_state(struct rfkill
*rfkill
, bool blocked
)
505 spin_lock_irqsave(&rfkill
->lock
, flags
);
506 prev
= !!(rfkill
->state
& RFKILL_BLOCK_SW
);
507 __rfkill_set_sw_state(rfkill
, blocked
);
508 hwblock
= !!(rfkill
->state
& RFKILL_BLOCK_HW
);
509 blocked
= blocked
|| hwblock
;
510 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
512 if (!rfkill
->registered
)
515 if (prev
!= blocked
&& !hwblock
)
516 schedule_work(&rfkill
->uevent_work
);
518 rfkill_led_trigger_event(rfkill
);
522 EXPORT_SYMBOL(rfkill_set_sw_state
);
524 void rfkill_init_sw_state(struct rfkill
*rfkill
, bool blocked
)
529 BUG_ON(rfkill
->registered
);
531 spin_lock_irqsave(&rfkill
->lock
, flags
);
532 __rfkill_set_sw_state(rfkill
, blocked
);
533 rfkill
->persistent
= true;
534 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
536 EXPORT_SYMBOL(rfkill_init_sw_state
);
538 void rfkill_set_states(struct rfkill
*rfkill
, bool sw
, bool hw
)
545 spin_lock_irqsave(&rfkill
->lock
, flags
);
548 * No need to care about prev/setblock ... this is for uevent only
549 * and that will get triggered by rfkill_set_block anyway.
551 swprev
= !!(rfkill
->state
& RFKILL_BLOCK_SW
);
552 hwprev
= !!(rfkill
->state
& RFKILL_BLOCK_HW
);
553 __rfkill_set_sw_state(rfkill
, sw
);
555 rfkill
->state
|= RFKILL_BLOCK_HW
;
557 rfkill
->state
&= ~RFKILL_BLOCK_HW
;
559 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
561 if (!rfkill
->registered
) {
562 rfkill
->persistent
= true;
564 if (swprev
!= sw
|| hwprev
!= hw
)
565 schedule_work(&rfkill
->uevent_work
);
567 rfkill_led_trigger_event(rfkill
);
570 EXPORT_SYMBOL(rfkill_set_states
);
572 static ssize_t
rfkill_name_show(struct device
*dev
,
573 struct device_attribute
*attr
,
576 struct rfkill
*rfkill
= to_rfkill(dev
);
578 return sprintf(buf
, "%s\n", rfkill
->name
);
581 static const char *rfkill_get_type_str(enum rfkill_type type
)
583 BUILD_BUG_ON(NUM_RFKILL_TYPES
!= RFKILL_TYPE_FM
+ 1);
586 case RFKILL_TYPE_WLAN
:
588 case RFKILL_TYPE_BLUETOOTH
:
590 case RFKILL_TYPE_UWB
:
591 return "ultrawideband";
592 case RFKILL_TYPE_WIMAX
:
594 case RFKILL_TYPE_WWAN
:
596 case RFKILL_TYPE_GPS
:
605 static ssize_t
rfkill_type_show(struct device
*dev
,
606 struct device_attribute
*attr
,
609 struct rfkill
*rfkill
= to_rfkill(dev
);
611 return sprintf(buf
, "%s\n", rfkill_get_type_str(rfkill
->type
));
614 static ssize_t
rfkill_idx_show(struct device
*dev
,
615 struct device_attribute
*attr
,
618 struct rfkill
*rfkill
= to_rfkill(dev
);
620 return sprintf(buf
, "%d\n", rfkill
->idx
);
623 static ssize_t
rfkill_persistent_show(struct device
*dev
,
624 struct device_attribute
*attr
,
627 struct rfkill
*rfkill
= to_rfkill(dev
);
629 return sprintf(buf
, "%d\n", rfkill
->persistent
);
632 static ssize_t
rfkill_hard_show(struct device
*dev
,
633 struct device_attribute
*attr
,
636 struct rfkill
*rfkill
= to_rfkill(dev
);
638 return sprintf(buf
, "%d\n", (rfkill
->state
& RFKILL_BLOCK_HW
) ? 1 : 0 );
641 static ssize_t
rfkill_soft_show(struct device
*dev
,
642 struct device_attribute
*attr
,
645 struct rfkill
*rfkill
= to_rfkill(dev
);
647 return sprintf(buf
, "%d\n", (rfkill
->state
& RFKILL_BLOCK_SW
) ? 1 : 0 );
650 static ssize_t
rfkill_soft_store(struct device
*dev
,
651 struct device_attribute
*attr
,
652 const char *buf
, size_t count
)
654 struct rfkill
*rfkill
= to_rfkill(dev
);
658 if (!capable(CAP_NET_ADMIN
))
661 err
= strict_strtoul(buf
, 0, &state
);
668 mutex_lock(&rfkill_global_mutex
);
669 rfkill_set_block(rfkill
, state
);
670 mutex_unlock(&rfkill_global_mutex
);
675 static u8
user_state_from_blocked(unsigned long state
)
677 if (state
& RFKILL_BLOCK_HW
)
678 return RFKILL_USER_STATE_HARD_BLOCKED
;
679 if (state
& RFKILL_BLOCK_SW
)
680 return RFKILL_USER_STATE_SOFT_BLOCKED
;
682 return RFKILL_USER_STATE_UNBLOCKED
;
685 static ssize_t
rfkill_state_show(struct device
*dev
,
686 struct device_attribute
*attr
,
689 struct rfkill
*rfkill
= to_rfkill(dev
);
691 return sprintf(buf
, "%d\n", user_state_from_blocked(rfkill
->state
));
694 static ssize_t
rfkill_state_store(struct device
*dev
,
695 struct device_attribute
*attr
,
696 const char *buf
, size_t count
)
698 struct rfkill
*rfkill
= to_rfkill(dev
);
702 if (!capable(CAP_NET_ADMIN
))
705 err
= strict_strtoul(buf
, 0, &state
);
709 if (state
!= RFKILL_USER_STATE_SOFT_BLOCKED
&&
710 state
!= RFKILL_USER_STATE_UNBLOCKED
)
713 mutex_lock(&rfkill_global_mutex
);
714 rfkill_set_block(rfkill
, state
== RFKILL_USER_STATE_SOFT_BLOCKED
);
715 mutex_unlock(&rfkill_global_mutex
);
720 static ssize_t
rfkill_claim_show(struct device
*dev
,
721 struct device_attribute
*attr
,
724 return sprintf(buf
, "%d\n", 0);
727 static ssize_t
rfkill_claim_store(struct device
*dev
,
728 struct device_attribute
*attr
,
729 const char *buf
, size_t count
)
734 static struct device_attribute rfkill_dev_attrs
[] = {
735 __ATTR(name
, S_IRUGO
, rfkill_name_show
, NULL
),
736 __ATTR(type
, S_IRUGO
, rfkill_type_show
, NULL
),
737 __ATTR(index
, S_IRUGO
, rfkill_idx_show
, NULL
),
738 __ATTR(persistent
, S_IRUGO
, rfkill_persistent_show
, NULL
),
739 __ATTR(state
, S_IRUGO
|S_IWUSR
, rfkill_state_show
, rfkill_state_store
),
740 __ATTR(claim
, S_IRUGO
|S_IWUSR
, rfkill_claim_show
, rfkill_claim_store
),
741 __ATTR(soft
, S_IRUGO
|S_IWUSR
, rfkill_soft_show
, rfkill_soft_store
),
742 __ATTR(hard
, S_IRUGO
, rfkill_hard_show
, NULL
),
746 static void rfkill_release(struct device
*dev
)
748 struct rfkill
*rfkill
= to_rfkill(dev
);
753 static int rfkill_dev_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
755 struct rfkill
*rfkill
= to_rfkill(dev
);
760 error
= add_uevent_var(env
, "RFKILL_NAME=%s", rfkill
->name
);
763 error
= add_uevent_var(env
, "RFKILL_TYPE=%s",
764 rfkill_get_type_str(rfkill
->type
));
767 spin_lock_irqsave(&rfkill
->lock
, flags
);
768 state
= rfkill
->state
;
769 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
770 error
= add_uevent_var(env
, "RFKILL_STATE=%d",
771 user_state_from_blocked(state
));
775 void rfkill_pause_polling(struct rfkill
*rfkill
)
779 if (!rfkill
->ops
->poll
)
782 cancel_delayed_work_sync(&rfkill
->poll_work
);
784 EXPORT_SYMBOL(rfkill_pause_polling
);
786 void rfkill_resume_polling(struct rfkill
*rfkill
)
790 if (!rfkill
->ops
->poll
)
793 schedule_work(&rfkill
->poll_work
.work
);
795 EXPORT_SYMBOL(rfkill_resume_polling
);
797 static int rfkill_suspend(struct device
*dev
, pm_message_t state
)
799 struct rfkill
*rfkill
= to_rfkill(dev
);
801 rfkill_pause_polling(rfkill
);
806 static int rfkill_resume(struct device
*dev
)
808 struct rfkill
*rfkill
= to_rfkill(dev
);
811 if (!rfkill
->persistent
) {
812 cur
= !!(rfkill
->state
& RFKILL_BLOCK_SW
);
813 rfkill_set_block(rfkill
, cur
);
816 rfkill_resume_polling(rfkill
);
821 static struct class rfkill_class
= {
823 .dev_release
= rfkill_release
,
824 .dev_attrs
= rfkill_dev_attrs
,
825 .dev_uevent
= rfkill_dev_uevent
,
826 .suspend
= rfkill_suspend
,
827 .resume
= rfkill_resume
,
830 bool rfkill_blocked(struct rfkill
*rfkill
)
835 spin_lock_irqsave(&rfkill
->lock
, flags
);
836 state
= rfkill
->state
;
837 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
839 return !!(state
& RFKILL_BLOCK_ANY
);
841 EXPORT_SYMBOL(rfkill_blocked
);
844 struct rfkill
* __must_check
rfkill_alloc(const char *name
,
845 struct device
*parent
,
846 const enum rfkill_type type
,
847 const struct rfkill_ops
*ops
,
850 struct rfkill
*rfkill
;
856 if (WARN_ON(!ops
->set_block
))
862 if (WARN_ON(type
== RFKILL_TYPE_ALL
|| type
>= NUM_RFKILL_TYPES
))
865 rfkill
= kzalloc(sizeof(*rfkill
), GFP_KERNEL
);
869 spin_lock_init(&rfkill
->lock
);
870 INIT_LIST_HEAD(&rfkill
->node
);
874 rfkill
->data
= ops_data
;
877 dev
->class = &rfkill_class
;
878 dev
->parent
= parent
;
879 device_initialize(dev
);
883 EXPORT_SYMBOL(rfkill_alloc
);
885 static void rfkill_poll(struct work_struct
*work
)
887 struct rfkill
*rfkill
;
889 rfkill
= container_of(work
, struct rfkill
, poll_work
.work
);
892 * Poll hardware state -- driver will use one of the
893 * rfkill_set{,_hw,_sw}_state functions and use its
894 * return value to update the current status.
896 rfkill
->ops
->poll(rfkill
, rfkill
->data
);
898 schedule_delayed_work(&rfkill
->poll_work
,
899 round_jiffies_relative(POLL_INTERVAL
));
902 static void rfkill_uevent_work(struct work_struct
*work
)
904 struct rfkill
*rfkill
;
906 rfkill
= container_of(work
, struct rfkill
, uevent_work
);
908 mutex_lock(&rfkill_global_mutex
);
909 rfkill_event(rfkill
);
910 mutex_unlock(&rfkill_global_mutex
);
913 static void rfkill_sync_work(struct work_struct
*work
)
915 struct rfkill
*rfkill
;
918 rfkill
= container_of(work
, struct rfkill
, sync_work
);
920 mutex_lock(&rfkill_global_mutex
);
921 cur
= rfkill_global_states
[rfkill
->type
].cur
;
922 rfkill_set_block(rfkill
, cur
);
923 mutex_unlock(&rfkill_global_mutex
);
926 int __must_check
rfkill_register(struct rfkill
*rfkill
)
928 static unsigned long rfkill_no
;
929 struct device
*dev
= &rfkill
->dev
;
934 mutex_lock(&rfkill_global_mutex
);
936 if (rfkill
->registered
) {
941 rfkill
->idx
= rfkill_no
;
942 dev_set_name(dev
, "rfkill%lu", rfkill_no
);
945 list_add_tail(&rfkill
->node
, &rfkill_list
);
947 error
= device_add(dev
);
951 error
= rfkill_led_trigger_register(rfkill
);
955 rfkill
->registered
= true;
957 INIT_DELAYED_WORK(&rfkill
->poll_work
, rfkill_poll
);
958 INIT_WORK(&rfkill
->uevent_work
, rfkill_uevent_work
);
959 INIT_WORK(&rfkill
->sync_work
, rfkill_sync_work
);
961 if (rfkill
->ops
->poll
)
962 schedule_delayed_work(&rfkill
->poll_work
,
963 round_jiffies_relative(POLL_INTERVAL
));
965 if (!rfkill
->persistent
|| rfkill_epo_lock_active
) {
966 schedule_work(&rfkill
->sync_work
);
968 #ifdef CONFIG_RFKILL_INPUT
969 bool soft_blocked
= !!(rfkill
->state
& RFKILL_BLOCK_SW
);
971 if (!atomic_read(&rfkill_input_disabled
))
972 __rfkill_switch_all(rfkill
->type
, soft_blocked
);
976 rfkill_send_events(rfkill
, RFKILL_OP_ADD
);
978 mutex_unlock(&rfkill_global_mutex
);
982 device_del(&rfkill
->dev
);
984 list_del_init(&rfkill
->node
);
986 mutex_unlock(&rfkill_global_mutex
);
989 EXPORT_SYMBOL(rfkill_register
);
991 void rfkill_unregister(struct rfkill
*rfkill
)
995 if (rfkill
->ops
->poll
)
996 cancel_delayed_work_sync(&rfkill
->poll_work
);
998 cancel_work_sync(&rfkill
->uevent_work
);
999 cancel_work_sync(&rfkill
->sync_work
);
1001 rfkill
->registered
= false;
1003 device_del(&rfkill
->dev
);
1005 mutex_lock(&rfkill_global_mutex
);
1006 rfkill_send_events(rfkill
, RFKILL_OP_DEL
);
1007 list_del_init(&rfkill
->node
);
1008 mutex_unlock(&rfkill_global_mutex
);
1010 rfkill_led_trigger_unregister(rfkill
);
1012 EXPORT_SYMBOL(rfkill_unregister
);
1014 void rfkill_destroy(struct rfkill
*rfkill
)
1017 put_device(&rfkill
->dev
);
1019 EXPORT_SYMBOL(rfkill_destroy
);
1021 static int rfkill_fop_open(struct inode
*inode
, struct file
*file
)
1023 struct rfkill_data
*data
;
1024 struct rfkill
*rfkill
;
1025 struct rfkill_int_event
*ev
, *tmp
;
1027 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
1031 INIT_LIST_HEAD(&data
->events
);
1032 mutex_init(&data
->mtx
);
1033 init_waitqueue_head(&data
->read_wait
);
1035 mutex_lock(&rfkill_global_mutex
);
1036 mutex_lock(&data
->mtx
);
1038 * start getting events from elsewhere but hold mtx to get
1039 * startup events added first
1041 list_add(&data
->list
, &rfkill_fds
);
1043 list_for_each_entry(rfkill
, &rfkill_list
, node
) {
1044 ev
= kzalloc(sizeof(*ev
), GFP_KERNEL
);
1047 rfkill_fill_event(&ev
->ev
, rfkill
, RFKILL_OP_ADD
);
1048 list_add_tail(&ev
->list
, &data
->events
);
1050 mutex_unlock(&data
->mtx
);
1051 mutex_unlock(&rfkill_global_mutex
);
1053 file
->private_data
= data
;
1055 return nonseekable_open(inode
, file
);
1058 mutex_unlock(&data
->mtx
);
1059 mutex_unlock(&rfkill_global_mutex
);
1060 mutex_destroy(&data
->mtx
);
1061 list_for_each_entry_safe(ev
, tmp
, &data
->events
, list
)
1067 static unsigned int rfkill_fop_poll(struct file
*file
, poll_table
*wait
)
1069 struct rfkill_data
*data
= file
->private_data
;
1070 unsigned int res
= POLLOUT
| POLLWRNORM
;
1072 poll_wait(file
, &data
->read_wait
, wait
);
1074 mutex_lock(&data
->mtx
);
1075 if (!list_empty(&data
->events
))
1076 res
= POLLIN
| POLLRDNORM
;
1077 mutex_unlock(&data
->mtx
);
1082 static bool rfkill_readable(struct rfkill_data
*data
)
1086 mutex_lock(&data
->mtx
);
1087 r
= !list_empty(&data
->events
);
1088 mutex_unlock(&data
->mtx
);
1093 static ssize_t
rfkill_fop_read(struct file
*file
, char __user
*buf
,
1094 size_t count
, loff_t
*pos
)
1096 struct rfkill_data
*data
= file
->private_data
;
1097 struct rfkill_int_event
*ev
;
1101 mutex_lock(&data
->mtx
);
1103 while (list_empty(&data
->events
)) {
1104 if (file
->f_flags
& O_NONBLOCK
) {
1108 mutex_unlock(&data
->mtx
);
1109 ret
= wait_event_interruptible(data
->read_wait
,
1110 rfkill_readable(data
));
1111 mutex_lock(&data
->mtx
);
1117 ev
= list_first_entry(&data
->events
, struct rfkill_int_event
,
1120 sz
= min_t(unsigned long, sizeof(ev
->ev
), count
);
1122 if (copy_to_user(buf
, &ev
->ev
, sz
))
1125 list_del(&ev
->list
);
1128 mutex_unlock(&data
->mtx
);
1132 static ssize_t
rfkill_fop_write(struct file
*file
, const char __user
*buf
,
1133 size_t count
, loff_t
*pos
)
1135 struct rfkill
*rfkill
;
1136 struct rfkill_event ev
;
1138 /* we don't need the 'hard' variable but accept it */
1139 if (count
< RFKILL_EVENT_SIZE_V1
- 1)
1143 * Copy as much data as we can accept into our 'ev' buffer,
1144 * but tell userspace how much we've copied so it can determine
1145 * our API version even in a write() call, if it cares.
1147 count
= min(count
, sizeof(ev
));
1148 if (copy_from_user(&ev
, buf
, count
))
1151 if (ev
.op
!= RFKILL_OP_CHANGE
&& ev
.op
!= RFKILL_OP_CHANGE_ALL
)
1154 if (ev
.type
>= NUM_RFKILL_TYPES
)
1157 mutex_lock(&rfkill_global_mutex
);
1159 if (ev
.op
== RFKILL_OP_CHANGE_ALL
) {
1160 if (ev
.type
== RFKILL_TYPE_ALL
) {
1162 for (i
= 0; i
< NUM_RFKILL_TYPES
; i
++)
1163 rfkill_global_states
[i
].cur
= ev
.soft
;
1165 rfkill_global_states
[ev
.type
].cur
= ev
.soft
;
1169 list_for_each_entry(rfkill
, &rfkill_list
, node
) {
1170 if (rfkill
->idx
!= ev
.idx
&& ev
.op
!= RFKILL_OP_CHANGE_ALL
)
1173 if (rfkill
->type
!= ev
.type
&& ev
.type
!= RFKILL_TYPE_ALL
)
1176 rfkill_set_block(rfkill
, ev
.soft
);
1178 mutex_unlock(&rfkill_global_mutex
);
1183 static int rfkill_fop_release(struct inode
*inode
, struct file
*file
)
1185 struct rfkill_data
*data
= file
->private_data
;
1186 struct rfkill_int_event
*ev
, *tmp
;
1188 mutex_lock(&rfkill_global_mutex
);
1189 list_del(&data
->list
);
1190 mutex_unlock(&rfkill_global_mutex
);
1192 mutex_destroy(&data
->mtx
);
1193 list_for_each_entry_safe(ev
, tmp
, &data
->events
, list
)
1196 #ifdef CONFIG_RFKILL_INPUT
1197 if (data
->input_handler
)
1198 if (atomic_dec_return(&rfkill_input_disabled
) == 0)
1199 printk(KERN_DEBUG
"rfkill: input handler enabled\n");
1207 #ifdef CONFIG_RFKILL_INPUT
1208 static long rfkill_fop_ioctl(struct file
*file
, unsigned int cmd
,
1211 struct rfkill_data
*data
= file
->private_data
;
1213 if (_IOC_TYPE(cmd
) != RFKILL_IOC_MAGIC
)
1216 if (_IOC_NR(cmd
) != RFKILL_IOC_NOINPUT
)
1219 mutex_lock(&data
->mtx
);
1221 if (!data
->input_handler
) {
1222 if (atomic_inc_return(&rfkill_input_disabled
) == 1)
1223 printk(KERN_DEBUG
"rfkill: input handler disabled\n");
1224 data
->input_handler
= true;
1227 mutex_unlock(&data
->mtx
);
1233 static const struct file_operations rfkill_fops
= {
1234 .owner
= THIS_MODULE
,
1235 .open
= rfkill_fop_open
,
1236 .read
= rfkill_fop_read
,
1237 .write
= rfkill_fop_write
,
1238 .poll
= rfkill_fop_poll
,
1239 .release
= rfkill_fop_release
,
1240 #ifdef CONFIG_RFKILL_INPUT
1241 .unlocked_ioctl
= rfkill_fop_ioctl
,
1242 .compat_ioctl
= rfkill_fop_ioctl
,
1246 static struct miscdevice rfkill_miscdev
= {
1248 .fops
= &rfkill_fops
,
1249 .minor
= MISC_DYNAMIC_MINOR
,
1252 static int __init
rfkill_init(void)
1257 for (i
= 0; i
< NUM_RFKILL_TYPES
; i
++)
1258 rfkill_global_states
[i
].cur
= !rfkill_default_state
;
1260 error
= class_register(&rfkill_class
);
1264 error
= misc_register(&rfkill_miscdev
);
1266 class_unregister(&rfkill_class
);
1270 #ifdef CONFIG_RFKILL_INPUT
1271 error
= rfkill_handler_init();
1273 misc_deregister(&rfkill_miscdev
);
1274 class_unregister(&rfkill_class
);
1282 subsys_initcall(rfkill_init
);
1284 static void __exit
rfkill_exit(void)
1286 #ifdef CONFIG_RFKILL_INPUT
1287 rfkill_handler_exit();
1289 misc_deregister(&rfkill_miscdev
);
1290 class_unregister(&rfkill_class
);
1292 module_exit(rfkill_exit
);