Lines Matching defs:rfkill
15 #include <linux/rfkill.h>
25 #include "rfkill.h"
37 struct rfkill {
67 #define to_rfkill(d) container_of(d, struct rfkill, dev)
92 * the rfkill struct under their own lock, and take this lock during
93 * rfkill method calls -- which will cause an AB-BA deadlock situation.
102 static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */
117 static void rfkill_led_trigger_event(struct rfkill *rfkill)
121 if (!rfkill->registered)
124 trigger = &rfkill->led_trigger;
126 if (rfkill->state & RFKILL_BLOCK_ANY)
134 struct rfkill *rfkill;
136 rfkill = container_of(led->trigger, struct rfkill, led_trigger);
138 rfkill_led_trigger_event(rfkill);
143 const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
145 return rfkill->led_trigger.name;
149 void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
151 BUG_ON(!rfkill);
153 rfkill->ledtrigname = name;
157 static int rfkill_led_trigger_register(struct rfkill *rfkill)
159 rfkill->led_trigger.name = rfkill->ledtrigname
160 ? : dev_name(&rfkill->dev);
161 rfkill->led_trigger.activate = rfkill_led_trigger_activate;
162 return led_trigger_register(&rfkill->led_trigger);
165 static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
167 led_trigger_unregister(&rfkill->led_trigger);
177 struct rfkill *rfkill;
180 list_for_each_entry(rfkill, &rfkill_list, node) {
181 if (!(rfkill->state & RFKILL_BLOCK_ANY)) {
205 rfkill_any_led_trigger.name = "rfkill-any";
210 rfkill_none_led_trigger.name = "rfkill-none";
228 static void rfkill_led_trigger_event(struct rfkill *rfkill)
232 static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
237 static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
255 static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
260 ev->idx = rfkill->idx;
261 ev->type = rfkill->type;
264 spin_lock_irqsave(&rfkill->lock, flags);
265 ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
266 ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
268 spin_unlock_irqrestore(&rfkill->lock, flags);
271 static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
280 rfkill_fill_event(&ev->ev, rfkill, op);
288 static void rfkill_event(struct rfkill *rfkill)
290 if (!rfkill->registered)
293 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
295 /* also send event to /dev/rfkill */
296 rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
302 * @rfkill: the rfkill struct to use
308 static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
314 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
322 if (rfkill->ops->query)
323 rfkill->ops->query(rfkill, rfkill->data);
325 spin_lock_irqsave(&rfkill->lock, flags);
326 prev = rfkill->state & RFKILL_BLOCK_SW;
329 rfkill->state |= RFKILL_BLOCK_SW_PREV;
331 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
334 rfkill->state |= RFKILL_BLOCK_SW;
336 rfkill->state &= ~RFKILL_BLOCK_SW;
338 rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
339 spin_unlock_irqrestore(&rfkill->lock, flags);
341 err = rfkill->ops->set_block(rfkill->data, blocked);
343 spin_lock_irqsave(&rfkill->lock, flags);
350 if (rfkill->state & RFKILL_BLOCK_SW_PREV)
351 rfkill->state |= RFKILL_BLOCK_SW;
353 rfkill->state &= ~RFKILL_BLOCK_SW;
355 rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
356 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
357 curr = rfkill->state & RFKILL_BLOCK_SW;
358 spin_unlock_irqrestore(&rfkill->lock, flags);
360 rfkill_led_trigger_event(rfkill);
364 rfkill_event(rfkill);
395 struct rfkill *rfkill;
398 list_for_each_entry(rfkill, &rfkill_list, node) {
399 if (rfkill->type != type && type != RFKILL_TYPE_ALL)
402 rfkill_set_block(rfkill, blocked);
432 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
433 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
440 struct rfkill *rfkill;
449 list_for_each_entry(rfkill, &rfkill_list, node)
450 rfkill_set_block(rfkill, true);
485 * Used by rfkill-input manually unlock state changes, when
525 bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
530 BUG_ON(!rfkill);
532 spin_lock_irqsave(&rfkill->lock, flags);
533 prev = !!(rfkill->state & RFKILL_BLOCK_HW);
535 rfkill->state |= RFKILL_BLOCK_HW;
537 rfkill->state &= ~RFKILL_BLOCK_HW;
538 ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
539 spin_unlock_irqrestore(&rfkill->lock, flags);
541 rfkill_led_trigger_event(rfkill);
544 if (rfkill->registered && prev != blocked)
545 schedule_work(&rfkill->uevent_work);
551 static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
556 if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
560 rfkill->state |= bit;
562 rfkill->state &= ~bit;
565 bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
570 BUG_ON(!rfkill);
572 spin_lock_irqsave(&rfkill->lock, flags);
573 prev = !!(rfkill->state & RFKILL_BLOCK_SW);
574 __rfkill_set_sw_state(rfkill, blocked);
575 hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
577 spin_unlock_irqrestore(&rfkill->lock, flags);
579 if (!rfkill->registered)
583 schedule_work(&rfkill->uevent_work);
585 rfkill_led_trigger_event(rfkill);
592 void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
596 BUG_ON(!rfkill);
597 BUG_ON(rfkill->registered);
599 spin_lock_irqsave(&rfkill->lock, flags);
600 __rfkill_set_sw_state(rfkill, blocked);
601 rfkill->persistent = true;
602 spin_unlock_irqrestore(&rfkill->lock, flags);
606 void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
611 BUG_ON(!rfkill);
613 spin_lock_irqsave(&rfkill->lock, flags);
619 swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
620 hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
621 __rfkill_set_sw_state(rfkill, sw);
623 rfkill->state |= RFKILL_BLOCK_HW;
625 rfkill->state &= ~RFKILL_BLOCK_HW;
627 spin_unlock_irqrestore(&rfkill->lock, flags);
629 if (!rfkill->registered) {
630 rfkill->persistent = true;
633 schedule_work(&rfkill->uevent_work);
635 rfkill_led_trigger_event(rfkill);
672 struct rfkill *rfkill = to_rfkill(dev);
674 return sprintf(buf, "%s\n", rfkill->name);
681 struct rfkill *rfkill = to_rfkill(dev);
683 return sprintf(buf, "%s\n", rfkill_types[rfkill->type]);
690 struct rfkill *rfkill = to_rfkill(dev);
692 return sprintf(buf, "%d\n", rfkill->idx);
699 struct rfkill *rfkill = to_rfkill(dev);
701 return sprintf(buf, "%d\n", rfkill->persistent);
708 struct rfkill *rfkill = to_rfkill(dev);
710 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
717 struct rfkill *rfkill = to_rfkill(dev);
719 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
725 struct rfkill *rfkill = to_rfkill(dev);
740 rfkill_set_block(rfkill, state);
760 struct rfkill *rfkill = to_rfkill(dev);
762 return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
768 struct rfkill *rfkill = to_rfkill(dev);
784 rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
805 struct rfkill *rfkill = to_rfkill(dev);
807 kfree(rfkill);
812 struct rfkill *rfkill = to_rfkill(dev);
817 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
821 rfkill_types[rfkill->type]);
824 spin_lock_irqsave(&rfkill->lock, flags);
825 state = rfkill->state;
826 spin_unlock_irqrestore(&rfkill->lock, flags);
832 void rfkill_pause_polling(struct rfkill *rfkill)
834 BUG_ON(!rfkill);
836 if (!rfkill->ops->poll)
839 rfkill->polling_paused = true;
840 cancel_delayed_work_sync(&rfkill->poll_work);
844 void rfkill_resume_polling(struct rfkill *rfkill)
846 BUG_ON(!rfkill);
848 if (!rfkill->ops->poll)
851 rfkill->polling_paused = false;
853 if (rfkill->suspended)
857 &rfkill->poll_work, 0);
864 struct rfkill *rfkill = to_rfkill(dev);
866 rfkill->suspended = true;
867 cancel_delayed_work_sync(&rfkill->poll_work);
874 struct rfkill *rfkill = to_rfkill(dev);
877 rfkill->suspended = false;
879 if (!rfkill->registered)
882 if (!rfkill->persistent) {
883 cur = !!(rfkill->state & RFKILL_BLOCK_SW);
884 rfkill_set_block(rfkill, cur);
887 if (rfkill->ops->poll && !rfkill->polling_paused)
889 &rfkill->poll_work, 0);
901 .name = "rfkill",
908 bool rfkill_blocked(struct rfkill *rfkill)
913 spin_lock_irqsave(&rfkill->lock, flags);
914 state = rfkill->state;
915 spin_unlock_irqrestore(&rfkill->lock, flags);
922 struct rfkill * __must_check rfkill_alloc(const char *name,
928 struct rfkill *rfkill;
943 rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
944 if (!rfkill)
947 spin_lock_init(&rfkill->lock);
948 INIT_LIST_HEAD(&rfkill->node);
949 rfkill->type = type;
950 strcpy(rfkill->name, name);
951 rfkill->ops = ops;
952 rfkill->data = ops_data;
954 dev = &rfkill->dev;
959 return rfkill;
965 struct rfkill *rfkill;
967 rfkill = container_of(work, struct rfkill, poll_work.work);
974 rfkill->ops->poll(rfkill, rfkill->data);
977 &rfkill->poll_work,
983 struct rfkill *rfkill;
985 rfkill = container_of(work, struct rfkill, uevent_work);
988 rfkill_event(rfkill);
994 struct rfkill *rfkill;
997 rfkill = container_of(work, struct rfkill, sync_work);
1000 cur = rfkill_global_states[rfkill->type].cur;
1001 rfkill_set_block(rfkill, cur);
1005 int __must_check rfkill_register(struct rfkill *rfkill)
1011 if (!rfkill)
1014 dev = &rfkill->dev;
1018 if (rfkill->registered) {
1023 rfkill->idx = rfkill_no;
1024 dev_set_name(dev, "rfkill%lu", rfkill_no);
1027 list_add_tail(&rfkill->node, &rfkill_list);
1033 error = rfkill_led_trigger_register(rfkill);
1037 rfkill->registered = true;
1039 INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
1040 INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
1041 INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
1043 if (rfkill->ops->poll)
1045 &rfkill->poll_work,
1048 if (!rfkill->persistent || rfkill_epo_lock_active) {
1049 schedule_work(&rfkill->sync_work);
1052 bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
1055 __rfkill_switch_all(rfkill->type, soft_blocked);
1060 rfkill_send_events(rfkill, RFKILL_OP_ADD);
1066 device_del(&rfkill->dev);
1068 list_del_init(&rfkill->node);
1075 void rfkill_unregister(struct rfkill *rfkill)
1077 BUG_ON(!rfkill);
1079 if (rfkill->ops->poll)
1080 cancel_delayed_work_sync(&rfkill->poll_work);
1082 cancel_work_sync(&rfkill->uevent_work);
1083 cancel_work_sync(&rfkill->sync_work);
1085 rfkill->registered = false;
1087 device_del(&rfkill->dev);
1090 rfkill_send_events(rfkill, RFKILL_OP_DEL);
1091 list_del_init(&rfkill->node);
1095 rfkill_led_trigger_unregister(rfkill);
1099 void rfkill_destroy(struct rfkill *rfkill)
1101 if (rfkill)
1102 put_device(&rfkill->dev);
1109 struct rfkill *rfkill;
1127 list_for_each_entry(rfkill, &rfkill_list, node) {
1131 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1212 struct rfkill *rfkill;
1237 list_for_each_entry(rfkill, &rfkill_list, node)
1238 if (rfkill->type == ev.type ||
1240 rfkill_set_block(rfkill, ev.soft);
1244 list_for_each_entry(rfkill, &rfkill_list, node)
1245 if (rfkill->idx == ev.idx &&
1246 (rfkill->type == ev.type ||
1248 rfkill_set_block(rfkill, ev.soft);
1277 printk(KERN_DEBUG "rfkill: input handler enabled\n");
1301 printk(KERN_DEBUG "rfkill: input handler disabled\n");
1325 #define RFKILL_NAME "rfkill"