Lines Matching defs:rfkill
15 #include <linux/rfkill.h>
25 #include "rfkill.h"
37 struct rfkill {
69 #define to_rfkill(d) container_of(d, struct rfkill, dev)
95 * the rfkill struct under their own lock, and take this lock during
96 * rfkill method calls -- which will cause an AB-BA deadlock situation.
105 static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */
120 static void rfkill_led_trigger_event(struct rfkill *rfkill)
124 if (!rfkill->registered)
127 trigger = &rfkill->led_trigger;
129 if (rfkill->state & RFKILL_BLOCK_ANY)
137 struct rfkill *rfkill;
139 rfkill = container_of(led->trigger, struct rfkill, led_trigger);
141 rfkill_led_trigger_event(rfkill);
146 const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
148 return rfkill->led_trigger.name;
152 void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
154 BUG_ON(!rfkill);
156 rfkill->ledtrigname = name;
160 static int rfkill_led_trigger_register(struct rfkill *rfkill)
162 rfkill->led_trigger.name = rfkill->ledtrigname
163 ? : dev_name(&rfkill->dev);
164 rfkill->led_trigger.activate = rfkill_led_trigger_activate;
165 return led_trigger_register(&rfkill->led_trigger);
168 static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
170 led_trigger_unregister(&rfkill->led_trigger);
180 struct rfkill *rfkill;
183 list_for_each_entry(rfkill, &rfkill_list, node) {
184 if (!(rfkill->state & RFKILL_BLOCK_ANY)) {
208 rfkill_any_led_trigger.name = "rfkill-any";
213 rfkill_none_led_trigger.name = "rfkill-none";
231 static void rfkill_led_trigger_event(struct rfkill *rfkill)
235 static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
240 static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
259 struct rfkill *rfkill,
264 ev->idx = rfkill->idx;
265 ev->type = rfkill->type;
268 spin_lock_irqsave(&rfkill->lock, flags);
269 ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
270 ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
272 ev->hard_block_reasons = rfkill->hard_block_reasons;
273 spin_unlock_irqrestore(&rfkill->lock, flags);
276 static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
285 rfkill_fill_event(&ev->ev, rfkill, op);
293 static void rfkill_event(struct rfkill *rfkill)
295 if (!rfkill->registered)
298 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
300 /* also send event to /dev/rfkill */
301 rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
307 * @rfkill: the rfkill struct to use
313 static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
319 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
327 if (rfkill->ops->query)
328 rfkill->ops->query(rfkill, rfkill->data);
330 spin_lock_irqsave(&rfkill->lock, flags);
331 prev = rfkill->state & RFKILL_BLOCK_SW;
334 rfkill->state |= RFKILL_BLOCK_SW_PREV;
336 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
339 rfkill->state |= RFKILL_BLOCK_SW;
341 rfkill->state &= ~RFKILL_BLOCK_SW;
343 rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
344 spin_unlock_irqrestore(&rfkill->lock, flags);
346 err = rfkill->ops->set_block(rfkill->data, blocked);
348 spin_lock_irqsave(&rfkill->lock, flags);
355 if (rfkill->state & RFKILL_BLOCK_SW_PREV)
356 rfkill->state |= RFKILL_BLOCK_SW;
358 rfkill->state &= ~RFKILL_BLOCK_SW;
360 rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
361 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
362 curr = rfkill->state & RFKILL_BLOCK_SW;
363 spin_unlock_irqrestore(&rfkill->lock, flags);
365 rfkill_led_trigger_event(rfkill);
369 rfkill_event(rfkill);
372 static void rfkill_sync(struct rfkill *rfkill)
376 if (!rfkill->need_sync)
379 rfkill_set_block(rfkill, rfkill_global_states[rfkill->type].cur);
380 rfkill->need_sync = false;
411 struct rfkill *rfkill;
414 list_for_each_entry(rfkill, &rfkill_list, node) {
415 if (rfkill->type != type && type != RFKILL_TYPE_ALL)
418 rfkill_set_block(rfkill, blocked);
448 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
449 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
456 struct rfkill *rfkill;
465 list_for_each_entry(rfkill, &rfkill_list, node)
466 rfkill_set_block(rfkill, true);
501 * Used by rfkill-input manually unlock state changes, when
541 bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
547 BUG_ON(!rfkill);
554 spin_lock_irqsave(&rfkill->lock, flags);
555 prev = !!(rfkill->hard_block_reasons & reason);
557 rfkill->state |= RFKILL_BLOCK_HW;
558 rfkill->hard_block_reasons |= reason;
560 rfkill->hard_block_reasons &= ~reason;
561 if (!rfkill->hard_block_reasons)
562 rfkill->state &= ~RFKILL_BLOCK_HW;
564 ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
565 spin_unlock_irqrestore(&rfkill->lock, flags);
567 rfkill_led_trigger_event(rfkill);
570 if (rfkill->registered && prev != blocked)
571 schedule_work(&rfkill->uevent_work);
577 static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
582 if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
586 rfkill->state |= bit;
588 rfkill->state &= ~bit;
591 bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
596 BUG_ON(!rfkill);
598 spin_lock_irqsave(&rfkill->lock, flags);
599 prev = !!(rfkill->state & RFKILL_BLOCK_SW);
600 __rfkill_set_sw_state(rfkill, blocked);
601 hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
603 spin_unlock_irqrestore(&rfkill->lock, flags);
605 if (!rfkill->registered)
609 schedule_work(&rfkill->uevent_work);
611 rfkill_led_trigger_event(rfkill);
618 void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
622 BUG_ON(!rfkill);
623 BUG_ON(rfkill->registered);
625 spin_lock_irqsave(&rfkill->lock, flags);
626 __rfkill_set_sw_state(rfkill, blocked);
627 rfkill->persistent = true;
628 spin_unlock_irqrestore(&rfkill->lock, flags);
632 void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
637 BUG_ON(!rfkill);
639 spin_lock_irqsave(&rfkill->lock, flags);
645 swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
646 hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
647 __rfkill_set_sw_state(rfkill, sw);
649 rfkill->state |= RFKILL_BLOCK_HW;
651 rfkill->state &= ~RFKILL_BLOCK_HW;
653 spin_unlock_irqrestore(&rfkill->lock, flags);
655 if (!rfkill->registered) {
656 rfkill->persistent = true;
659 schedule_work(&rfkill->uevent_work);
661 rfkill_led_trigger_event(rfkill);
698 struct rfkill *rfkill = to_rfkill(dev);
700 return sysfs_emit(buf, "%s\n", rfkill->name);
707 struct rfkill *rfkill = to_rfkill(dev);
709 return sysfs_emit(buf, "%s\n", rfkill_types[rfkill->type]);
716 struct rfkill *rfkill = to_rfkill(dev);
718 return sysfs_emit(buf, "%d\n", rfkill->idx);
725 struct rfkill *rfkill = to_rfkill(dev);
727 return sysfs_emit(buf, "%d\n", rfkill->persistent);
734 struct rfkill *rfkill = to_rfkill(dev);
736 return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0);
743 struct rfkill *rfkill = to_rfkill(dev);
746 rfkill_sync(rfkill);
749 return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0);
755 struct rfkill *rfkill = to_rfkill(dev);
770 rfkill_sync(rfkill);
771 rfkill_set_block(rfkill, state);
782 struct rfkill *rfkill = to_rfkill(dev);
784 return sysfs_emit(buf, "0x%lx\n", rfkill->hard_block_reasons);
801 struct rfkill *rfkill = to_rfkill(dev);
804 rfkill_sync(rfkill);
807 return sysfs_emit(buf, "%d\n", user_state_from_blocked(rfkill->state));
813 struct rfkill *rfkill = to_rfkill(dev);
829 rfkill_sync(rfkill);
830 rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
852 struct rfkill *rfkill = to_rfkill(dev);
854 kfree(rfkill);
859 struct rfkill *rfkill = to_rfkill(dev);
865 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
869 rfkill_types[rfkill->type]);
872 spin_lock_irqsave(&rfkill->lock, flags);
873 state = rfkill->state;
874 reasons = rfkill->hard_block_reasons;
875 spin_unlock_irqrestore(&rfkill->lock, flags);
883 void rfkill_pause_polling(struct rfkill *rfkill)
885 BUG_ON(!rfkill);
887 if (!rfkill->ops->poll)
890 rfkill->polling_paused = true;
891 cancel_delayed_work_sync(&rfkill->poll_work);
895 void rfkill_resume_polling(struct rfkill *rfkill)
897 BUG_ON(!rfkill);
899 if (!rfkill->ops->poll)
902 rfkill->polling_paused = false;
904 if (rfkill->suspended)
908 &rfkill->poll_work, 0);
915 struct rfkill *rfkill = to_rfkill(dev);
917 rfkill->suspended = true;
918 cancel_delayed_work_sync(&rfkill->poll_work);
925 struct rfkill *rfkill = to_rfkill(dev);
928 rfkill->suspended = false;
930 if (!rfkill->registered)
933 if (!rfkill->persistent) {
934 cur = !!(rfkill->state & RFKILL_BLOCK_SW);
935 rfkill_set_block(rfkill, cur);
938 if (rfkill->ops->poll && !rfkill->polling_paused)
940 &rfkill->poll_work, 0);
952 .name = "rfkill",
959 bool rfkill_blocked(struct rfkill *rfkill)
964 spin_lock_irqsave(&rfkill->lock, flags);
965 state = rfkill->state;
966 spin_unlock_irqrestore(&rfkill->lock, flags);
972 bool rfkill_soft_blocked(struct rfkill *rfkill)
977 spin_lock_irqsave(&rfkill->lock, flags);
978 state = rfkill->state;
979 spin_unlock_irqrestore(&rfkill->lock, flags);
985 struct rfkill * __must_check rfkill_alloc(const char *name,
991 struct rfkill *rfkill;
1006 rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
1007 if (!rfkill)
1010 spin_lock_init(&rfkill->lock);
1011 INIT_LIST_HEAD(&rfkill->node);
1012 rfkill->type = type;
1013 strcpy(rfkill->name, name);
1014 rfkill->ops = ops;
1015 rfkill->data = ops_data;
1017 dev = &rfkill->dev;
1022 return rfkill;
1028 struct rfkill *rfkill;
1030 rfkill = container_of(work, struct rfkill, poll_work.work);
1037 rfkill->ops->poll(rfkill, rfkill->data);
1040 &rfkill->poll_work,
1046 struct rfkill *rfkill;
1048 rfkill = container_of(work, struct rfkill, uevent_work);
1051 rfkill_event(rfkill);
1057 struct rfkill *rfkill = container_of(work, struct rfkill, sync_work);
1060 rfkill_sync(rfkill);
1064 int __must_check rfkill_register(struct rfkill *rfkill)
1070 if (!rfkill)
1073 dev = &rfkill->dev;
1077 if (rfkill->registered) {
1082 rfkill->idx = rfkill_no;
1083 dev_set_name(dev, "rfkill%lu", rfkill_no);
1086 list_add_tail(&rfkill->node, &rfkill_list);
1092 error = rfkill_led_trigger_register(rfkill);
1096 rfkill->registered = true;
1098 INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
1099 INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
1100 INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
1102 if (rfkill->ops->poll)
1104 &rfkill->poll_work,
1107 if (!rfkill->persistent || rfkill_epo_lock_active) {
1108 rfkill->need_sync = true;
1109 schedule_work(&rfkill->sync_work);
1112 bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
1115 __rfkill_switch_all(rfkill->type, soft_blocked);
1120 rfkill_send_events(rfkill, RFKILL_OP_ADD);
1126 device_del(&rfkill->dev);
1128 list_del_init(&rfkill->node);
1135 void rfkill_unregister(struct rfkill *rfkill)
1137 BUG_ON(!rfkill);
1139 if (rfkill->ops->poll)
1140 cancel_delayed_work_sync(&rfkill->poll_work);
1142 cancel_work_sync(&rfkill->uevent_work);
1143 cancel_work_sync(&rfkill->sync_work);
1145 rfkill->registered = false;
1147 device_del(&rfkill->dev);
1150 rfkill_send_events(rfkill, RFKILL_OP_DEL);
1151 list_del_init(&rfkill->node);
1155 rfkill_led_trigger_unregister(rfkill);
1159 void rfkill_destroy(struct rfkill *rfkill)
1161 if (rfkill)
1162 put_device(&rfkill->dev);
1169 struct rfkill *rfkill;
1188 list_for_each_entry(rfkill, &rfkill_list, node) {
1192 rfkill_sync(rfkill);
1193 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1276 struct rfkill *rfkill;
1302 list_for_each_entry(rfkill, &rfkill_list, node)
1303 if (rfkill->type == ev.type ||
1305 rfkill_set_block(rfkill, ev.soft);
1309 list_for_each_entry(rfkill, &rfkill_list, node)
1310 if (rfkill->idx == ev.idx &&
1311 (rfkill->type == ev.type ||
1313 rfkill_set_block(rfkill, ev.soft);
1342 printk(KERN_DEBUG "rfkill: input handler enabled\n");
1366 printk(KERN_DEBUG "rfkill: input handler disabled\n");
1404 #define RFKILL_NAME "rfkill"