cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

core.c (33382B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright (C) 2006 - 2007 Ivo van Doorn
      4 * Copyright (C) 2007 Dmitry Torokhov
      5 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
      6 */
      7
      8#include <linux/kernel.h>
      9#include <linux/module.h>
     10#include <linux/init.h>
     11#include <linux/workqueue.h>
     12#include <linux/capability.h>
     13#include <linux/list.h>
     14#include <linux/mutex.h>
     15#include <linux/rfkill.h>
     16#include <linux/sched.h>
     17#include <linux/spinlock.h>
     18#include <linux/device.h>
     19#include <linux/miscdevice.h>
     20#include <linux/wait.h>
     21#include <linux/poll.h>
     22#include <linux/fs.h>
     23#include <linux/slab.h>
     24
     25#include "rfkill.h"
     26
     27#define POLL_INTERVAL		(5 * HZ)
     28
     29#define RFKILL_BLOCK_HW		BIT(0)
     30#define RFKILL_BLOCK_SW		BIT(1)
     31#define RFKILL_BLOCK_SW_PREV	BIT(2)
     32#define RFKILL_BLOCK_ANY	(RFKILL_BLOCK_HW |\
     33				 RFKILL_BLOCK_SW |\
     34				 RFKILL_BLOCK_SW_PREV)
     35#define RFKILL_BLOCK_SW_SETCALL	BIT(31)
     36
     37struct rfkill {
     38	spinlock_t		lock;
     39
     40	enum rfkill_type	type;
     41
     42	unsigned long		state;
     43	unsigned long		hard_block_reasons;
     44
     45	u32			idx;
     46
     47	bool			registered;
     48	bool			persistent;
     49	bool			polling_paused;
     50	bool			suspended;
     51
     52	const struct rfkill_ops	*ops;
     53	void			*data;
     54
     55#ifdef CONFIG_RFKILL_LEDS
     56	struct led_trigger	led_trigger;
     57	const char		*ledtrigname;
     58#endif
     59
     60	struct device		dev;
     61	struct list_head	node;
     62
     63	struct delayed_work	poll_work;
     64	struct work_struct	uevent_work;
     65	struct work_struct	sync_work;
     66	char			name[];
     67};
     68#define to_rfkill(d)	container_of(d, struct rfkill, dev)
     69
     70struct rfkill_int_event {
     71	struct list_head	list;
     72	struct rfkill_event_ext	ev;
     73};
     74
     75struct rfkill_data {
     76	struct list_head	list;
     77	struct list_head	events;
     78	struct mutex		mtx;
     79	wait_queue_head_t	read_wait;
     80	bool			input_handler;
     81	u8			max_size;
     82};
     83
     84
     85MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
     86MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
     87MODULE_DESCRIPTION("RF switch support");
     88MODULE_LICENSE("GPL");
     89
     90
     91/*
     92 * The locking here should be made much smarter, we currently have
     93 * a bit of a stupid situation because drivers might want to register
     94 * the rfkill struct under their own lock, and take this lock during
     95 * rfkill method calls -- which will cause an AB-BA deadlock situation.
     96 *
     97 * To fix that, we need to rework this code here to be mostly lock-free
     98 * and only use the mutex for list manipulations, not to protect the
     99 * various other global variables. Then we can avoid holding the mutex
    100 * around driver operations, and all is happy.
    101 */
    102static LIST_HEAD(rfkill_list);	/* list of registered rf switches */
    103static DEFINE_MUTEX(rfkill_global_mutex);
    104static LIST_HEAD(rfkill_fds);	/* list of open fds of /dev/rfkill */
    105
    106static unsigned int rfkill_default_state = 1;
    107module_param_named(default_state, rfkill_default_state, uint, 0444);
    108MODULE_PARM_DESC(default_state,
    109		 "Default initial state for all radio types, 0 = radio off");
    110
    111static struct {
    112	bool cur, sav;
    113} rfkill_global_states[NUM_RFKILL_TYPES];
    114
    115static bool rfkill_epo_lock_active;
    116
    117
    118#ifdef CONFIG_RFKILL_LEDS
    119static void rfkill_led_trigger_event(struct rfkill *rfkill)
    120{
    121	struct led_trigger *trigger;
    122
    123	if (!rfkill->registered)
    124		return;
    125
    126	trigger = &rfkill->led_trigger;
    127
    128	if (rfkill->state & RFKILL_BLOCK_ANY)
    129		led_trigger_event(trigger, LED_OFF);
    130	else
    131		led_trigger_event(trigger, LED_FULL);
    132}
    133
    134static int rfkill_led_trigger_activate(struct led_classdev *led)
    135{
    136	struct rfkill *rfkill;
    137
    138	rfkill = container_of(led->trigger, struct rfkill, led_trigger);
    139
    140	rfkill_led_trigger_event(rfkill);
    141
    142	return 0;
    143}
    144
    145const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
    146{
    147	return rfkill->led_trigger.name;
    148}
    149EXPORT_SYMBOL(rfkill_get_led_trigger_name);
    150
    151void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
    152{
    153	BUG_ON(!rfkill);
    154
    155	rfkill->ledtrigname = name;
    156}
    157EXPORT_SYMBOL(rfkill_set_led_trigger_name);
    158
    159static int rfkill_led_trigger_register(struct rfkill *rfkill)
    160{
    161	rfkill->led_trigger.name = rfkill->ledtrigname
    162					? : dev_name(&rfkill->dev);
    163	rfkill->led_trigger.activate = rfkill_led_trigger_activate;
    164	return led_trigger_register(&rfkill->led_trigger);
    165}
    166
    167static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
    168{
    169	led_trigger_unregister(&rfkill->led_trigger);
    170}
    171
    172static struct led_trigger rfkill_any_led_trigger;
    173static struct led_trigger rfkill_none_led_trigger;
    174static struct work_struct rfkill_global_led_trigger_work;
    175
    176static void rfkill_global_led_trigger_worker(struct work_struct *work)
    177{
    178	enum led_brightness brightness = LED_OFF;
    179	struct rfkill *rfkill;
    180
    181	mutex_lock(&rfkill_global_mutex);
    182	list_for_each_entry(rfkill, &rfkill_list, node) {
    183		if (!(rfkill->state & RFKILL_BLOCK_ANY)) {
    184			brightness = LED_FULL;
    185			break;
    186		}
    187	}
    188	mutex_unlock(&rfkill_global_mutex);
    189
    190	led_trigger_event(&rfkill_any_led_trigger, brightness);
    191	led_trigger_event(&rfkill_none_led_trigger,
    192			  brightness == LED_OFF ? LED_FULL : LED_OFF);
    193}
    194
    195static void rfkill_global_led_trigger_event(void)
    196{
    197	schedule_work(&rfkill_global_led_trigger_work);
    198}
    199
    200static int rfkill_global_led_trigger_register(void)
    201{
    202	int ret;
    203
    204	INIT_WORK(&rfkill_global_led_trigger_work,
    205			rfkill_global_led_trigger_worker);
    206
    207	rfkill_any_led_trigger.name = "rfkill-any";
    208	ret = led_trigger_register(&rfkill_any_led_trigger);
    209	if (ret)
    210		return ret;
    211
    212	rfkill_none_led_trigger.name = "rfkill-none";
    213	ret = led_trigger_register(&rfkill_none_led_trigger);
    214	if (ret)
    215		led_trigger_unregister(&rfkill_any_led_trigger);
    216	else
    217		/* Delay activation until all global triggers are registered */
    218		rfkill_global_led_trigger_event();
    219
    220	return ret;
    221}
    222
    223static void rfkill_global_led_trigger_unregister(void)
    224{
    225	led_trigger_unregister(&rfkill_none_led_trigger);
    226	led_trigger_unregister(&rfkill_any_led_trigger);
    227	cancel_work_sync(&rfkill_global_led_trigger_work);
    228}
    229#else
    230static void rfkill_led_trigger_event(struct rfkill *rfkill)
    231{
    232}
    233
    234static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
    235{
    236	return 0;
    237}
    238
    239static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
    240{
    241}
    242
    243static void rfkill_global_led_trigger_event(void)
    244{
    245}
    246
    247static int rfkill_global_led_trigger_register(void)
    248{
    249	return 0;
    250}
    251
    252static void rfkill_global_led_trigger_unregister(void)
    253{
    254}
    255#endif /* CONFIG_RFKILL_LEDS */
    256
    257static void rfkill_fill_event(struct rfkill_event_ext *ev,
    258			      struct rfkill *rfkill,
    259			      enum rfkill_operation op)
    260{
    261	unsigned long flags;
    262
    263	ev->idx = rfkill->idx;
    264	ev->type = rfkill->type;
    265	ev->op = op;
    266
    267	spin_lock_irqsave(&rfkill->lock, flags);
    268	ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
    269	ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
    270					RFKILL_BLOCK_SW_PREV));
    271	ev->hard_block_reasons = rfkill->hard_block_reasons;
    272	spin_unlock_irqrestore(&rfkill->lock, flags);
    273}
    274
    275static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
    276{
    277	struct rfkill_data *data;
    278	struct rfkill_int_event *ev;
    279
    280	list_for_each_entry(data, &rfkill_fds, list) {
    281		ev = kzalloc(sizeof(*ev), GFP_KERNEL);
    282		if (!ev)
    283			continue;
    284		rfkill_fill_event(&ev->ev, rfkill, op);
    285		mutex_lock(&data->mtx);
    286		list_add_tail(&ev->list, &data->events);
    287		mutex_unlock(&data->mtx);
    288		wake_up_interruptible(&data->read_wait);
    289	}
    290}
    291
    292static void rfkill_event(struct rfkill *rfkill)
    293{
    294	if (!rfkill->registered)
    295		return;
    296
    297	kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
    298
    299	/* also send event to /dev/rfkill */
    300	rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
    301}
    302
    303/**
    304 * rfkill_set_block - wrapper for set_block method
    305 *
    306 * @rfkill: the rfkill struct to use
    307 * @blocked: the new software state
    308 *
    309 * Calls the set_block method (when applicable) and handles notifications
    310 * etc. as well.
    311 */
    312static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
    313{
    314	unsigned long flags;
    315	bool prev, curr;
    316	int err;
    317
    318	if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
    319		return;
    320
    321	/*
    322	 * Some platforms (...!) generate input events which affect the
    323	 * _hard_ kill state -- whenever something tries to change the
    324	 * current software state query the hardware state too.
    325	 */
    326	if (rfkill->ops->query)
    327		rfkill->ops->query(rfkill, rfkill->data);
    328
    329	spin_lock_irqsave(&rfkill->lock, flags);
    330	prev = rfkill->state & RFKILL_BLOCK_SW;
    331
    332	if (prev)
    333		rfkill->state |= RFKILL_BLOCK_SW_PREV;
    334	else
    335		rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
    336
    337	if (blocked)
    338		rfkill->state |= RFKILL_BLOCK_SW;
    339	else
    340		rfkill->state &= ~RFKILL_BLOCK_SW;
    341
    342	rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
    343	spin_unlock_irqrestore(&rfkill->lock, flags);
    344
    345	err = rfkill->ops->set_block(rfkill->data, blocked);
    346
    347	spin_lock_irqsave(&rfkill->lock, flags);
    348	if (err) {
    349		/*
    350		 * Failed -- reset status to _PREV, which may be different
    351		 * from what we have set _PREV to earlier in this function
    352		 * if rfkill_set_sw_state was invoked.
    353		 */
    354		if (rfkill->state & RFKILL_BLOCK_SW_PREV)
    355			rfkill->state |= RFKILL_BLOCK_SW;
    356		else
    357			rfkill->state &= ~RFKILL_BLOCK_SW;
    358	}
    359	rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
    360	rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
    361	curr = rfkill->state & RFKILL_BLOCK_SW;
    362	spin_unlock_irqrestore(&rfkill->lock, flags);
    363
    364	rfkill_led_trigger_event(rfkill);
    365	rfkill_global_led_trigger_event();
    366
    367	if (prev != curr)
    368		rfkill_event(rfkill);
    369}
    370
    371static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
    372{
    373	int i;
    374
    375	if (type != RFKILL_TYPE_ALL) {
    376		rfkill_global_states[type].cur = blocked;
    377		return;
    378	}
    379
    380	for (i = 0; i < NUM_RFKILL_TYPES; i++)
    381		rfkill_global_states[i].cur = blocked;
    382}
    383
    384#ifdef CONFIG_RFKILL_INPUT
    385static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
    386
    387/**
    388 * __rfkill_switch_all - Toggle state of all switches of given type
    389 * @type: type of interfaces to be affected
    390 * @blocked: the new state
    391 *
    392 * This function sets the state of all switches of given type,
    393 * unless a specific switch is suspended.
    394 *
    395 * Caller must have acquired rfkill_global_mutex.
    396 */
    397static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
    398{
    399	struct rfkill *rfkill;
    400
    401	rfkill_update_global_state(type, blocked);
    402	list_for_each_entry(rfkill, &rfkill_list, node) {
    403		if (rfkill->type != type && type != RFKILL_TYPE_ALL)
    404			continue;
    405
    406		rfkill_set_block(rfkill, blocked);
    407	}
    408}
    409
    410/**
    411 * rfkill_switch_all - Toggle state of all switches of given type
    412 * @type: type of interfaces to be affected
    413 * @blocked: the new state
    414 *
    415 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
    416 * Please refer to __rfkill_switch_all() for details.
    417 *
    418 * Does nothing if the EPO lock is active.
    419 */
    420void rfkill_switch_all(enum rfkill_type type, bool blocked)
    421{
    422	if (atomic_read(&rfkill_input_disabled))
    423		return;
    424
    425	mutex_lock(&rfkill_global_mutex);
    426
    427	if (!rfkill_epo_lock_active)
    428		__rfkill_switch_all(type, blocked);
    429
    430	mutex_unlock(&rfkill_global_mutex);
    431}
    432
    433/**
    434 * rfkill_epo - emergency power off all transmitters
    435 *
    436 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
    437 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
    438 *
    439 * The global state before the EPO is saved and can be restored later
    440 * using rfkill_restore_states().
    441 */
    442void rfkill_epo(void)
    443{
    444	struct rfkill *rfkill;
    445	int i;
    446
    447	if (atomic_read(&rfkill_input_disabled))
    448		return;
    449
    450	mutex_lock(&rfkill_global_mutex);
    451
    452	rfkill_epo_lock_active = true;
    453	list_for_each_entry(rfkill, &rfkill_list, node)
    454		rfkill_set_block(rfkill, true);
    455
    456	for (i = 0; i < NUM_RFKILL_TYPES; i++) {
    457		rfkill_global_states[i].sav = rfkill_global_states[i].cur;
    458		rfkill_global_states[i].cur = true;
    459	}
    460
    461	mutex_unlock(&rfkill_global_mutex);
    462}
    463
    464/**
    465 * rfkill_restore_states - restore global states
    466 *
    467 * Restore (and sync switches to) the global state from the
    468 * states in rfkill_default_states.  This can undo the effects of
    469 * a call to rfkill_epo().
    470 */
    471void rfkill_restore_states(void)
    472{
    473	int i;
    474
    475	if (atomic_read(&rfkill_input_disabled))
    476		return;
    477
    478	mutex_lock(&rfkill_global_mutex);
    479
    480	rfkill_epo_lock_active = false;
    481	for (i = 0; i < NUM_RFKILL_TYPES; i++)
    482		__rfkill_switch_all(i, rfkill_global_states[i].sav);
    483	mutex_unlock(&rfkill_global_mutex);
    484}
    485
    486/**
    487 * rfkill_remove_epo_lock - unlock state changes
    488 *
    489 * Used by rfkill-input manually unlock state changes, when
    490 * the EPO switch is deactivated.
    491 */
    492void rfkill_remove_epo_lock(void)
    493{
    494	if (atomic_read(&rfkill_input_disabled))
    495		return;
    496
    497	mutex_lock(&rfkill_global_mutex);
    498	rfkill_epo_lock_active = false;
    499	mutex_unlock(&rfkill_global_mutex);
    500}
    501
    502/**
    503 * rfkill_is_epo_lock_active - returns true EPO is active
    504 *
    505 * Returns 0 (false) if there is NOT an active EPO condition,
    506 * and 1 (true) if there is an active EPO condition, which
    507 * locks all radios in one of the BLOCKED states.
    508 *
    509 * Can be called in atomic context.
    510 */
    511bool rfkill_is_epo_lock_active(void)
    512{
    513	return rfkill_epo_lock_active;
    514}
    515
    516/**
    517 * rfkill_get_global_sw_state - returns global state for a type
    518 * @type: the type to get the global state of
    519 *
    520 * Returns the current global state for a given wireless
    521 * device type.
    522 */
    523bool rfkill_get_global_sw_state(const enum rfkill_type type)
    524{
    525	return rfkill_global_states[type].cur;
    526}
    527#endif
    528
    529bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
    530				bool blocked, unsigned long reason)
    531{
    532	unsigned long flags;
    533	bool ret, prev;
    534
    535	BUG_ON(!rfkill);
    536
    537	if (WARN(reason &
    538	    ~(RFKILL_HARD_BLOCK_SIGNAL | RFKILL_HARD_BLOCK_NOT_OWNER),
    539	    "hw_state reason not supported: 0x%lx", reason))
    540		return blocked;
    541
    542	spin_lock_irqsave(&rfkill->lock, flags);
    543	prev = !!(rfkill->hard_block_reasons & reason);
    544	if (blocked) {
    545		rfkill->state |= RFKILL_BLOCK_HW;
    546		rfkill->hard_block_reasons |= reason;
    547	} else {
    548		rfkill->hard_block_reasons &= ~reason;
    549		if (!rfkill->hard_block_reasons)
    550			rfkill->state &= ~RFKILL_BLOCK_HW;
    551	}
    552	ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
    553	spin_unlock_irqrestore(&rfkill->lock, flags);
    554
    555	rfkill_led_trigger_event(rfkill);
    556	rfkill_global_led_trigger_event();
    557
    558	if (rfkill->registered && prev != blocked)
    559		schedule_work(&rfkill->uevent_work);
    560
    561	return ret;
    562}
    563EXPORT_SYMBOL(rfkill_set_hw_state_reason);
    564
    565static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
    566{
    567	u32 bit = RFKILL_BLOCK_SW;
    568
    569	/* if in a ops->set_block right now, use other bit */
    570	if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
    571		bit = RFKILL_BLOCK_SW_PREV;
    572
    573	if (blocked)
    574		rfkill->state |= bit;
    575	else
    576		rfkill->state &= ~bit;
    577}
    578
    579bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
    580{
    581	unsigned long flags;
    582	bool prev, hwblock;
    583
    584	BUG_ON(!rfkill);
    585
    586	spin_lock_irqsave(&rfkill->lock, flags);
    587	prev = !!(rfkill->state & RFKILL_BLOCK_SW);
    588	__rfkill_set_sw_state(rfkill, blocked);
    589	hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
    590	blocked = blocked || hwblock;
    591	spin_unlock_irqrestore(&rfkill->lock, flags);
    592
    593	if (!rfkill->registered)
    594		return blocked;
    595
    596	if (prev != blocked && !hwblock)
    597		schedule_work(&rfkill->uevent_work);
    598
    599	rfkill_led_trigger_event(rfkill);
    600	rfkill_global_led_trigger_event();
    601
    602	return blocked;
    603}
    604EXPORT_SYMBOL(rfkill_set_sw_state);
    605
    606void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
    607{
    608	unsigned long flags;
    609
    610	BUG_ON(!rfkill);
    611	BUG_ON(rfkill->registered);
    612
    613	spin_lock_irqsave(&rfkill->lock, flags);
    614	__rfkill_set_sw_state(rfkill, blocked);
    615	rfkill->persistent = true;
    616	spin_unlock_irqrestore(&rfkill->lock, flags);
    617}
    618EXPORT_SYMBOL(rfkill_init_sw_state);
    619
    620void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
    621{
    622	unsigned long flags;
    623	bool swprev, hwprev;
    624
    625	BUG_ON(!rfkill);
    626
    627	spin_lock_irqsave(&rfkill->lock, flags);
    628
    629	/*
    630	 * No need to care about prev/setblock ... this is for uevent only
    631	 * and that will get triggered by rfkill_set_block anyway.
    632	 */
    633	swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
    634	hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
    635	__rfkill_set_sw_state(rfkill, sw);
    636	if (hw)
    637		rfkill->state |= RFKILL_BLOCK_HW;
    638	else
    639		rfkill->state &= ~RFKILL_BLOCK_HW;
    640
    641	spin_unlock_irqrestore(&rfkill->lock, flags);
    642
    643	if (!rfkill->registered) {
    644		rfkill->persistent = true;
    645	} else {
    646		if (swprev != sw || hwprev != hw)
    647			schedule_work(&rfkill->uevent_work);
    648
    649		rfkill_led_trigger_event(rfkill);
    650		rfkill_global_led_trigger_event();
    651	}
    652}
    653EXPORT_SYMBOL(rfkill_set_states);
    654
    655static const char * const rfkill_types[] = {
    656	NULL, /* RFKILL_TYPE_ALL */
    657	"wlan",
    658	"bluetooth",
    659	"ultrawideband",
    660	"wimax",
    661	"wwan",
    662	"gps",
    663	"fm",
    664	"nfc",
    665};
    666
    667enum rfkill_type rfkill_find_type(const char *name)
    668{
    669	int i;
    670
    671	BUILD_BUG_ON(ARRAY_SIZE(rfkill_types) != NUM_RFKILL_TYPES);
    672
    673	if (!name)
    674		return RFKILL_TYPE_ALL;
    675
    676	for (i = 1; i < NUM_RFKILL_TYPES; i++)
    677		if (!strcmp(name, rfkill_types[i]))
    678			return i;
    679	return RFKILL_TYPE_ALL;
    680}
    681EXPORT_SYMBOL(rfkill_find_type);
    682
    683static ssize_t name_show(struct device *dev, struct device_attribute *attr,
    684			 char *buf)
    685{
    686	struct rfkill *rfkill = to_rfkill(dev);
    687
    688	return sprintf(buf, "%s\n", rfkill->name);
    689}
    690static DEVICE_ATTR_RO(name);
    691
    692static ssize_t type_show(struct device *dev, struct device_attribute *attr,
    693			 char *buf)
    694{
    695	struct rfkill *rfkill = to_rfkill(dev);
    696
    697	return sprintf(buf, "%s\n", rfkill_types[rfkill->type]);
    698}
    699static DEVICE_ATTR_RO(type);
    700
    701static ssize_t index_show(struct device *dev, struct device_attribute *attr,
    702			  char *buf)
    703{
    704	struct rfkill *rfkill = to_rfkill(dev);
    705
    706	return sprintf(buf, "%d\n", rfkill->idx);
    707}
    708static DEVICE_ATTR_RO(index);
    709
    710static ssize_t persistent_show(struct device *dev,
    711			       struct device_attribute *attr, char *buf)
    712{
    713	struct rfkill *rfkill = to_rfkill(dev);
    714
    715	return sprintf(buf, "%d\n", rfkill->persistent);
    716}
    717static DEVICE_ATTR_RO(persistent);
    718
    719static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
    720			 char *buf)
    721{
    722	struct rfkill *rfkill = to_rfkill(dev);
    723
    724	return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
    725}
    726static DEVICE_ATTR_RO(hard);
    727
    728static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
    729			 char *buf)
    730{
    731	struct rfkill *rfkill = to_rfkill(dev);
    732
    733	return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
    734}
    735
    736static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
    737			  const char *buf, size_t count)
    738{
    739	struct rfkill *rfkill = to_rfkill(dev);
    740	unsigned long state;
    741	int err;
    742
    743	if (!capable(CAP_NET_ADMIN))
    744		return -EPERM;
    745
    746	err = kstrtoul(buf, 0, &state);
    747	if (err)
    748		return err;
    749
    750	if (state > 1 )
    751		return -EINVAL;
    752
    753	mutex_lock(&rfkill_global_mutex);
    754	rfkill_set_block(rfkill, state);
    755	mutex_unlock(&rfkill_global_mutex);
    756
    757	return count;
    758}
    759static DEVICE_ATTR_RW(soft);
    760
    761static ssize_t hard_block_reasons_show(struct device *dev,
    762				       struct device_attribute *attr,
    763				       char *buf)
    764{
    765	struct rfkill *rfkill = to_rfkill(dev);
    766
    767	return sprintf(buf, "0x%lx\n", rfkill->hard_block_reasons);
    768}
    769static DEVICE_ATTR_RO(hard_block_reasons);
    770
    771static u8 user_state_from_blocked(unsigned long state)
    772{
    773	if (state & RFKILL_BLOCK_HW)
    774		return RFKILL_USER_STATE_HARD_BLOCKED;
    775	if (state & RFKILL_BLOCK_SW)
    776		return RFKILL_USER_STATE_SOFT_BLOCKED;
    777
    778	return RFKILL_USER_STATE_UNBLOCKED;
    779}
    780
    781static ssize_t state_show(struct device *dev, struct device_attribute *attr,
    782			  char *buf)
    783{
    784	struct rfkill *rfkill = to_rfkill(dev);
    785
    786	return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
    787}
    788
    789static ssize_t state_store(struct device *dev, struct device_attribute *attr,
    790			   const char *buf, size_t count)
    791{
    792	struct rfkill *rfkill = to_rfkill(dev);
    793	unsigned long state;
    794	int err;
    795
    796	if (!capable(CAP_NET_ADMIN))
    797		return -EPERM;
    798
    799	err = kstrtoul(buf, 0, &state);
    800	if (err)
    801		return err;
    802
    803	if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
    804	    state != RFKILL_USER_STATE_UNBLOCKED)
    805		return -EINVAL;
    806
    807	mutex_lock(&rfkill_global_mutex);
    808	rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
    809	mutex_unlock(&rfkill_global_mutex);
    810
    811	return count;
    812}
    813static DEVICE_ATTR_RW(state);
    814
    815static struct attribute *rfkill_dev_attrs[] = {
    816	&dev_attr_name.attr,
    817	&dev_attr_type.attr,
    818	&dev_attr_index.attr,
    819	&dev_attr_persistent.attr,
    820	&dev_attr_state.attr,
    821	&dev_attr_soft.attr,
    822	&dev_attr_hard.attr,
    823	&dev_attr_hard_block_reasons.attr,
    824	NULL,
    825};
    826ATTRIBUTE_GROUPS(rfkill_dev);
    827
    828static void rfkill_release(struct device *dev)
    829{
    830	struct rfkill *rfkill = to_rfkill(dev);
    831
    832	kfree(rfkill);
    833}
    834
    835static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
    836{
    837	struct rfkill *rfkill = to_rfkill(dev);
    838	unsigned long flags;
    839	unsigned long reasons;
    840	u32 state;
    841	int error;
    842
    843	error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
    844	if (error)
    845		return error;
    846	error = add_uevent_var(env, "RFKILL_TYPE=%s",
    847			       rfkill_types[rfkill->type]);
    848	if (error)
    849		return error;
    850	spin_lock_irqsave(&rfkill->lock, flags);
    851	state = rfkill->state;
    852	reasons = rfkill->hard_block_reasons;
    853	spin_unlock_irqrestore(&rfkill->lock, flags);
    854	error = add_uevent_var(env, "RFKILL_STATE=%d",
    855			       user_state_from_blocked(state));
    856	if (error)
    857		return error;
    858	return add_uevent_var(env, "RFKILL_HW_BLOCK_REASON=0x%lx", reasons);
    859}
    860
    861void rfkill_pause_polling(struct rfkill *rfkill)
    862{
    863	BUG_ON(!rfkill);
    864
    865	if (!rfkill->ops->poll)
    866		return;
    867
    868	rfkill->polling_paused = true;
    869	cancel_delayed_work_sync(&rfkill->poll_work);
    870}
    871EXPORT_SYMBOL(rfkill_pause_polling);
    872
    873void rfkill_resume_polling(struct rfkill *rfkill)
    874{
    875	BUG_ON(!rfkill);
    876
    877	if (!rfkill->ops->poll)
    878		return;
    879
    880	rfkill->polling_paused = false;
    881
    882	if (rfkill->suspended)
    883		return;
    884
    885	queue_delayed_work(system_power_efficient_wq,
    886			   &rfkill->poll_work, 0);
    887}
    888EXPORT_SYMBOL(rfkill_resume_polling);
    889
    890#ifdef CONFIG_PM_SLEEP
    891static int rfkill_suspend(struct device *dev)
    892{
    893	struct rfkill *rfkill = to_rfkill(dev);
    894
    895	rfkill->suspended = true;
    896	cancel_delayed_work_sync(&rfkill->poll_work);
    897
    898	return 0;
    899}
    900
    901static int rfkill_resume(struct device *dev)
    902{
    903	struct rfkill *rfkill = to_rfkill(dev);
    904	bool cur;
    905
    906	rfkill->suspended = false;
    907
    908	if (!rfkill->registered)
    909		return 0;
    910
    911	if (!rfkill->persistent) {
    912		cur = !!(rfkill->state & RFKILL_BLOCK_SW);
    913		rfkill_set_block(rfkill, cur);
    914	}
    915
    916	if (rfkill->ops->poll && !rfkill->polling_paused)
    917		queue_delayed_work(system_power_efficient_wq,
    918				   &rfkill->poll_work, 0);
    919
    920	return 0;
    921}
    922
    923static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
    924#define RFKILL_PM_OPS (&rfkill_pm_ops)
    925#else
    926#define RFKILL_PM_OPS NULL
    927#endif
    928
    929static struct class rfkill_class = {
    930	.name		= "rfkill",
    931	.dev_release	= rfkill_release,
    932	.dev_groups	= rfkill_dev_groups,
    933	.dev_uevent	= rfkill_dev_uevent,
    934	.pm		= RFKILL_PM_OPS,
    935};
    936
    937bool rfkill_blocked(struct rfkill *rfkill)
    938{
    939	unsigned long flags;
    940	u32 state;
    941
    942	spin_lock_irqsave(&rfkill->lock, flags);
    943	state = rfkill->state;
    944	spin_unlock_irqrestore(&rfkill->lock, flags);
    945
    946	return !!(state & RFKILL_BLOCK_ANY);
    947}
    948EXPORT_SYMBOL(rfkill_blocked);
    949
    950bool rfkill_soft_blocked(struct rfkill *rfkill)
    951{
    952	unsigned long flags;
    953	u32 state;
    954
    955	spin_lock_irqsave(&rfkill->lock, flags);
    956	state = rfkill->state;
    957	spin_unlock_irqrestore(&rfkill->lock, flags);
    958
    959	return !!(state & RFKILL_BLOCK_SW);
    960}
    961EXPORT_SYMBOL(rfkill_soft_blocked);
    962
    963struct rfkill * __must_check rfkill_alloc(const char *name,
    964					  struct device *parent,
    965					  const enum rfkill_type type,
    966					  const struct rfkill_ops *ops,
    967					  void *ops_data)
    968{
    969	struct rfkill *rfkill;
    970	struct device *dev;
    971
    972	if (WARN_ON(!ops))
    973		return NULL;
    974
    975	if (WARN_ON(!ops->set_block))
    976		return NULL;
    977
    978	if (WARN_ON(!name))
    979		return NULL;
    980
    981	if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
    982		return NULL;
    983
    984	rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
    985	if (!rfkill)
    986		return NULL;
    987
    988	spin_lock_init(&rfkill->lock);
    989	INIT_LIST_HEAD(&rfkill->node);
    990	rfkill->type = type;
    991	strcpy(rfkill->name, name);
    992	rfkill->ops = ops;
    993	rfkill->data = ops_data;
    994
    995	dev = &rfkill->dev;
    996	dev->class = &rfkill_class;
    997	dev->parent = parent;
    998	device_initialize(dev);
    999
   1000	return rfkill;
   1001}
   1002EXPORT_SYMBOL(rfkill_alloc);
   1003
   1004static void rfkill_poll(struct work_struct *work)
   1005{
   1006	struct rfkill *rfkill;
   1007
   1008	rfkill = container_of(work, struct rfkill, poll_work.work);
   1009
   1010	/*
   1011	 * Poll hardware state -- driver will use one of the
   1012	 * rfkill_set{,_hw,_sw}_state functions and use its
   1013	 * return value to update the current status.
   1014	 */
   1015	rfkill->ops->poll(rfkill, rfkill->data);
   1016
   1017	queue_delayed_work(system_power_efficient_wq,
   1018		&rfkill->poll_work,
   1019		round_jiffies_relative(POLL_INTERVAL));
   1020}
   1021
   1022static void rfkill_uevent_work(struct work_struct *work)
   1023{
   1024	struct rfkill *rfkill;
   1025
   1026	rfkill = container_of(work, struct rfkill, uevent_work);
   1027
   1028	mutex_lock(&rfkill_global_mutex);
   1029	rfkill_event(rfkill);
   1030	mutex_unlock(&rfkill_global_mutex);
   1031}
   1032
   1033static void rfkill_sync_work(struct work_struct *work)
   1034{
   1035	struct rfkill *rfkill;
   1036	bool cur;
   1037
   1038	rfkill = container_of(work, struct rfkill, sync_work);
   1039
   1040	mutex_lock(&rfkill_global_mutex);
   1041	cur = rfkill_global_states[rfkill->type].cur;
   1042	rfkill_set_block(rfkill, cur);
   1043	mutex_unlock(&rfkill_global_mutex);
   1044}
   1045
   1046int __must_check rfkill_register(struct rfkill *rfkill)
   1047{
   1048	static unsigned long rfkill_no;
   1049	struct device *dev;
   1050	int error;
   1051
   1052	if (!rfkill)
   1053		return -EINVAL;
   1054
   1055	dev = &rfkill->dev;
   1056
   1057	mutex_lock(&rfkill_global_mutex);
   1058
   1059	if (rfkill->registered) {
   1060		error = -EALREADY;
   1061		goto unlock;
   1062	}
   1063
   1064	rfkill->idx = rfkill_no;
   1065	dev_set_name(dev, "rfkill%lu", rfkill_no);
   1066	rfkill_no++;
   1067
   1068	list_add_tail(&rfkill->node, &rfkill_list);
   1069
   1070	error = device_add(dev);
   1071	if (error)
   1072		goto remove;
   1073
   1074	error = rfkill_led_trigger_register(rfkill);
   1075	if (error)
   1076		goto devdel;
   1077
   1078	rfkill->registered = true;
   1079
   1080	INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
   1081	INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
   1082	INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
   1083
   1084	if (rfkill->ops->poll)
   1085		queue_delayed_work(system_power_efficient_wq,
   1086			&rfkill->poll_work,
   1087			round_jiffies_relative(POLL_INTERVAL));
   1088
   1089	if (!rfkill->persistent || rfkill_epo_lock_active) {
   1090		schedule_work(&rfkill->sync_work);
   1091	} else {
   1092#ifdef CONFIG_RFKILL_INPUT
   1093		bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
   1094
   1095		if (!atomic_read(&rfkill_input_disabled))
   1096			__rfkill_switch_all(rfkill->type, soft_blocked);
   1097#endif
   1098	}
   1099
   1100	rfkill_global_led_trigger_event();
   1101	rfkill_send_events(rfkill, RFKILL_OP_ADD);
   1102
   1103	mutex_unlock(&rfkill_global_mutex);
   1104	return 0;
   1105
   1106 devdel:
   1107	device_del(&rfkill->dev);
   1108 remove:
   1109	list_del_init(&rfkill->node);
   1110 unlock:
   1111	mutex_unlock(&rfkill_global_mutex);
   1112	return error;
   1113}
   1114EXPORT_SYMBOL(rfkill_register);
   1115
   1116void rfkill_unregister(struct rfkill *rfkill)
   1117{
   1118	BUG_ON(!rfkill);
   1119
   1120	if (rfkill->ops->poll)
   1121		cancel_delayed_work_sync(&rfkill->poll_work);
   1122
   1123	cancel_work_sync(&rfkill->uevent_work);
   1124	cancel_work_sync(&rfkill->sync_work);
   1125
   1126	rfkill->registered = false;
   1127
   1128	device_del(&rfkill->dev);
   1129
   1130	mutex_lock(&rfkill_global_mutex);
   1131	rfkill_send_events(rfkill, RFKILL_OP_DEL);
   1132	list_del_init(&rfkill->node);
   1133	rfkill_global_led_trigger_event();
   1134	mutex_unlock(&rfkill_global_mutex);
   1135
   1136	rfkill_led_trigger_unregister(rfkill);
   1137}
   1138EXPORT_SYMBOL(rfkill_unregister);
   1139
   1140void rfkill_destroy(struct rfkill *rfkill)
   1141{
   1142	if (rfkill)
   1143		put_device(&rfkill->dev);
   1144}
   1145EXPORT_SYMBOL(rfkill_destroy);
   1146
   1147static int rfkill_fop_open(struct inode *inode, struct file *file)
   1148{
   1149	struct rfkill_data *data;
   1150	struct rfkill *rfkill;
   1151	struct rfkill_int_event *ev, *tmp;
   1152
   1153	data = kzalloc(sizeof(*data), GFP_KERNEL);
   1154	if (!data)
   1155		return -ENOMEM;
   1156
   1157	data->max_size = RFKILL_EVENT_SIZE_V1;
   1158
   1159	INIT_LIST_HEAD(&data->events);
   1160	mutex_init(&data->mtx);
   1161	init_waitqueue_head(&data->read_wait);
   1162
   1163	mutex_lock(&rfkill_global_mutex);
   1164	mutex_lock(&data->mtx);
   1165	/*
   1166	 * start getting events from elsewhere but hold mtx to get
   1167	 * startup events added first
   1168	 */
   1169
   1170	list_for_each_entry(rfkill, &rfkill_list, node) {
   1171		ev = kzalloc(sizeof(*ev), GFP_KERNEL);
   1172		if (!ev)
   1173			goto free;
   1174		rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
   1175		list_add_tail(&ev->list, &data->events);
   1176	}
   1177	list_add(&data->list, &rfkill_fds);
   1178	mutex_unlock(&data->mtx);
   1179	mutex_unlock(&rfkill_global_mutex);
   1180
   1181	file->private_data = data;
   1182
   1183	return stream_open(inode, file);
   1184
   1185 free:
   1186	mutex_unlock(&data->mtx);
   1187	mutex_unlock(&rfkill_global_mutex);
   1188	mutex_destroy(&data->mtx);
   1189	list_for_each_entry_safe(ev, tmp, &data->events, list)
   1190		kfree(ev);
   1191	kfree(data);
   1192	return -ENOMEM;
   1193}
   1194
   1195static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait)
   1196{
   1197	struct rfkill_data *data = file->private_data;
   1198	__poll_t res = EPOLLOUT | EPOLLWRNORM;
   1199
   1200	poll_wait(file, &data->read_wait, wait);
   1201
   1202	mutex_lock(&data->mtx);
   1203	if (!list_empty(&data->events))
   1204		res = EPOLLIN | EPOLLRDNORM;
   1205	mutex_unlock(&data->mtx);
   1206
   1207	return res;
   1208}
   1209
   1210static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
   1211			       size_t count, loff_t *pos)
   1212{
   1213	struct rfkill_data *data = file->private_data;
   1214	struct rfkill_int_event *ev;
   1215	unsigned long sz;
   1216	int ret;
   1217
   1218	mutex_lock(&data->mtx);
   1219
   1220	while (list_empty(&data->events)) {
   1221		if (file->f_flags & O_NONBLOCK) {
   1222			ret = -EAGAIN;
   1223			goto out;
   1224		}
   1225		mutex_unlock(&data->mtx);
   1226		/* since we re-check and it just compares pointers,
   1227		 * using !list_empty() without locking isn't a problem
   1228		 */
   1229		ret = wait_event_interruptible(data->read_wait,
   1230					       !list_empty(&data->events));
   1231		mutex_lock(&data->mtx);
   1232
   1233		if (ret)
   1234			goto out;
   1235	}
   1236
   1237	ev = list_first_entry(&data->events, struct rfkill_int_event,
   1238				list);
   1239
   1240	sz = min_t(unsigned long, sizeof(ev->ev), count);
   1241	sz = min_t(unsigned long, sz, data->max_size);
   1242	ret = sz;
   1243	if (copy_to_user(buf, &ev->ev, sz))
   1244		ret = -EFAULT;
   1245
   1246	list_del(&ev->list);
   1247	kfree(ev);
   1248 out:
   1249	mutex_unlock(&data->mtx);
   1250	return ret;
   1251}
   1252
   1253static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
   1254				size_t count, loff_t *pos)
   1255{
   1256	struct rfkill_data *data = file->private_data;
   1257	struct rfkill *rfkill;
   1258	struct rfkill_event_ext ev;
   1259	int ret;
   1260
   1261	/* we don't need the 'hard' variable but accept it */
   1262	if (count < RFKILL_EVENT_SIZE_V1 - 1)
   1263		return -EINVAL;
   1264
   1265	/*
   1266	 * Copy as much data as we can accept into our 'ev' buffer,
   1267	 * but tell userspace how much we've copied so it can determine
   1268	 * our API version even in a write() call, if it cares.
   1269	 */
   1270	count = min(count, sizeof(ev));
   1271	count = min_t(size_t, count, data->max_size);
   1272	if (copy_from_user(&ev, buf, count))
   1273		return -EFAULT;
   1274
   1275	if (ev.type >= NUM_RFKILL_TYPES)
   1276		return -EINVAL;
   1277
   1278	mutex_lock(&rfkill_global_mutex);
   1279
   1280	switch (ev.op) {
   1281	case RFKILL_OP_CHANGE_ALL:
   1282		rfkill_update_global_state(ev.type, ev.soft);
   1283		list_for_each_entry(rfkill, &rfkill_list, node)
   1284			if (rfkill->type == ev.type ||
   1285			    ev.type == RFKILL_TYPE_ALL)
   1286				rfkill_set_block(rfkill, ev.soft);
   1287		ret = 0;
   1288		break;
   1289	case RFKILL_OP_CHANGE:
   1290		list_for_each_entry(rfkill, &rfkill_list, node)
   1291			if (rfkill->idx == ev.idx &&
   1292			    (rfkill->type == ev.type ||
   1293			     ev.type == RFKILL_TYPE_ALL))
   1294				rfkill_set_block(rfkill, ev.soft);
   1295		ret = 0;
   1296		break;
   1297	default:
   1298		ret = -EINVAL;
   1299		break;
   1300	}
   1301
   1302	mutex_unlock(&rfkill_global_mutex);
   1303
   1304	return ret ?: count;
   1305}
   1306
   1307static int rfkill_fop_release(struct inode *inode, struct file *file)
   1308{
   1309	struct rfkill_data *data = file->private_data;
   1310	struct rfkill_int_event *ev, *tmp;
   1311
   1312	mutex_lock(&rfkill_global_mutex);
   1313	list_del(&data->list);
   1314	mutex_unlock(&rfkill_global_mutex);
   1315
   1316	mutex_destroy(&data->mtx);
   1317	list_for_each_entry_safe(ev, tmp, &data->events, list)
   1318		kfree(ev);
   1319
   1320#ifdef CONFIG_RFKILL_INPUT
   1321	if (data->input_handler)
   1322		if (atomic_dec_return(&rfkill_input_disabled) == 0)
   1323			printk(KERN_DEBUG "rfkill: input handler enabled\n");
   1324#endif
   1325
   1326	kfree(data);
   1327
   1328	return 0;
   1329}
   1330
   1331static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
   1332			     unsigned long arg)
   1333{
   1334	struct rfkill_data *data = file->private_data;
   1335	int ret = -ENOSYS;
   1336	u32 size;
   1337
   1338	if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
   1339		return -ENOSYS;
   1340
   1341	mutex_lock(&data->mtx);
   1342	switch (_IOC_NR(cmd)) {
   1343#ifdef CONFIG_RFKILL_INPUT
   1344	case RFKILL_IOC_NOINPUT:
   1345		if (!data->input_handler) {
   1346			if (atomic_inc_return(&rfkill_input_disabled) == 1)
   1347				printk(KERN_DEBUG "rfkill: input handler disabled\n");
   1348			data->input_handler = true;
   1349		}
   1350		ret = 0;
   1351		break;
   1352#endif
   1353	case RFKILL_IOC_MAX_SIZE:
   1354		if (get_user(size, (__u32 __user *)arg)) {
   1355			ret = -EFAULT;
   1356			break;
   1357		}
   1358		if (size < RFKILL_EVENT_SIZE_V1 || size > U8_MAX) {
   1359			ret = -EINVAL;
   1360			break;
   1361		}
   1362		data->max_size = size;
   1363		ret = 0;
   1364		break;
   1365	default:
   1366		break;
   1367	}
   1368	mutex_unlock(&data->mtx);
   1369
   1370	return ret;
   1371}
   1372
   1373static const struct file_operations rfkill_fops = {
   1374	.owner		= THIS_MODULE,
   1375	.open		= rfkill_fop_open,
   1376	.read		= rfkill_fop_read,
   1377	.write		= rfkill_fop_write,
   1378	.poll		= rfkill_fop_poll,
   1379	.release	= rfkill_fop_release,
   1380	.unlocked_ioctl	= rfkill_fop_ioctl,
   1381	.compat_ioctl	= compat_ptr_ioctl,
   1382	.llseek		= no_llseek,
   1383};
   1384
   1385#define RFKILL_NAME "rfkill"
   1386
   1387static struct miscdevice rfkill_miscdev = {
   1388	.fops	= &rfkill_fops,
   1389	.name	= RFKILL_NAME,
   1390	.minor	= RFKILL_MINOR,
   1391};
   1392
   1393static int __init rfkill_init(void)
   1394{
   1395	int error;
   1396
   1397	rfkill_update_global_state(RFKILL_TYPE_ALL, !rfkill_default_state);
   1398
   1399	error = class_register(&rfkill_class);
   1400	if (error)
   1401		goto error_class;
   1402
   1403	error = misc_register(&rfkill_miscdev);
   1404	if (error)
   1405		goto error_misc;
   1406
   1407	error = rfkill_global_led_trigger_register();
   1408	if (error)
   1409		goto error_led_trigger;
   1410
   1411#ifdef CONFIG_RFKILL_INPUT
   1412	error = rfkill_handler_init();
   1413	if (error)
   1414		goto error_input;
   1415#endif
   1416
   1417	return 0;
   1418
   1419#ifdef CONFIG_RFKILL_INPUT
   1420error_input:
   1421	rfkill_global_led_trigger_unregister();
   1422#endif
   1423error_led_trigger:
   1424	misc_deregister(&rfkill_miscdev);
   1425error_misc:
   1426	class_unregister(&rfkill_class);
   1427error_class:
   1428	return error;
   1429}
   1430subsys_initcall(rfkill_init);
   1431
   1432static void __exit rfkill_exit(void)
   1433{
   1434#ifdef CONFIG_RFKILL_INPUT
   1435	rfkill_handler_exit();
   1436#endif
   1437	rfkill_global_led_trigger_unregister();
   1438	misc_deregister(&rfkill_miscdev);
   1439	class_unregister(&rfkill_class);
   1440}
   1441module_exit(rfkill_exit);
   1442
   1443MODULE_ALIAS_MISCDEV(RFKILL_MINOR);
   1444MODULE_ALIAS("devname:" RFKILL_NAME);