/* * The locking here should be made much smarter, we currently have * a bit of a stupid situation because drivers might want to register * the rfkill struct under their own lock, and take this lock during * rfkill method calls -- which will cause an AB-BA deadlock situation. * * To fix that, we need to rework this code here to be mostly lock-free * and only use the mutex for list manipulations, not to protect the * various other global variables. Then we can avoid holding the mutex * around driver operations, and all is happy.
*/ static LIST_HEAD(rfkill_list); /* list of registered rf switches */ static DEFINE_MUTEX(rfkill_global_mutex); static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */
staticunsignedint rfkill_default_state = 1;
module_param_named(default_state, rfkill_default_state, uint, 0444);
MODULE_PARM_DESC(default_state, "Default initial state for all radio types, 0 = radio off");
rfkill_any_led_trigger.name = "rfkill-any";
ret = led_trigger_register(&rfkill_any_led_trigger); if (ret) return ret;
rfkill_none_led_trigger.name = "rfkill-none";
ret = led_trigger_register(&rfkill_none_led_trigger); if (ret)
led_trigger_unregister(&rfkill_any_led_trigger); else /* Delay activation until all global triggers are registered */
rfkill_global_led_trigger_event();
staticvoid rfkill_event(struct rfkill *rfkill)
{ if (!rfkill->registered) return;
kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
/* also send event to /dev/rfkill */
rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
}
/** * rfkill_set_block - wrapper for set_block method * * @rfkill: the rfkill struct to use * @blocked: the new software state * * Calls the set_block method (when applicable) and handles notifications * etc. as well.
*/ staticvoid rfkill_set_block(struct rfkill *rfkill, bool blocked)
{ unsignedlong flags; bool prev, curr; int err;
if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) return;
/* * Some platforms (...!) generate input events which affect the * _hard_ kill state -- whenever something tries to change the * current software state query the hardware state too.
*/ if (rfkill->ops->query)
rfkill->ops->query(rfkill, rfkill->data);
spin_lock_irqsave(&rfkill->lock, flags); if (err) { /* * Failed -- reset status to _PREV, which may be different * from what we have set _PREV to earlier in this function * if rfkill_set_sw_state was invoked.
*/ if (rfkill->state & RFKILL_BLOCK_SW_PREV)
rfkill->state |= RFKILL_BLOCK_SW; else
rfkill->state &= ~RFKILL_BLOCK_SW;
}
rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
curr = rfkill->state & RFKILL_BLOCK_SW;
spin_unlock_irqrestore(&rfkill->lock, flags);
/** * __rfkill_switch_all - Toggle state of all switches of given type * @type: type of interfaces to be affected * @blocked: the new state * * This function sets the state of all switches of given type, * unless a specific switch is suspended. * * Caller must have acquired rfkill_global_mutex.
*/ staticvoid __rfkill_switch_all(constenum rfkill_type type, bool blocked)
{ struct rfkill *rfkill;
rfkill_update_global_state(type, blocked);
list_for_each_entry(rfkill, &rfkill_list, node) { if (rfkill->type != type && type != RFKILL_TYPE_ALL) continue;
rfkill_set_block(rfkill, blocked);
}
}
/** * rfkill_switch_all - Toggle state of all switches of given type * @type: type of interfaces to be affected * @blocked: the new state * * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). * Please refer to __rfkill_switch_all() for details. * * Does nothing if the EPO lock is active.
*/ void rfkill_switch_all(enum rfkill_type type, bool blocked)
{ if (atomic_read(&rfkill_input_disabled)) return;
mutex_lock(&rfkill_global_mutex);
if (!rfkill_epo_lock_active)
__rfkill_switch_all(type, blocked);
mutex_unlock(&rfkill_global_mutex);
}
/** * rfkill_epo - emergency power off all transmitters * * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. * * The global state before the EPO is saved and can be restored later * using rfkill_restore_states().
*/ void rfkill_epo(void)
{ struct rfkill *rfkill; int i;
for (i = 0; i < NUM_RFKILL_TYPES; i++) {
rfkill_global_states[i].sav = rfkill_global_states[i].cur;
rfkill_global_states[i].cur = true;
}
mutex_unlock(&rfkill_global_mutex);
}
/** * rfkill_restore_states - restore global states * * Restore (and sync switches to) the global state from the * states in rfkill_default_states. This can undo the effects of * a call to rfkill_epo().
*/ void rfkill_restore_states(void)
{ int i;
if (atomic_read(&rfkill_input_disabled)) return;
mutex_lock(&rfkill_global_mutex);
rfkill_epo_lock_active = false; for (i = 0; i < NUM_RFKILL_TYPES; i++)
__rfkill_switch_all(i, rfkill_global_states[i].sav);
mutex_unlock(&rfkill_global_mutex);
}
/** * rfkill_remove_epo_lock - unlock state changes * * Used by rfkill-input manually unlock state changes, when * the EPO switch is deactivated.
*/ void rfkill_remove_epo_lock(void)
{ if (atomic_read(&rfkill_input_disabled)) return;
/** * rfkill_is_epo_lock_active - returns true EPO is active * * Returns 0 (false) if there is NOT an active EPO condition, * and 1 (true) if there is an active EPO condition, which * locks all radios in one of the BLOCKED states. * * Can be called in atomic context.
*/ bool rfkill_is_epo_lock_active(void)
{ return rfkill_epo_lock_active;
}
/** * rfkill_get_global_sw_state - returns global state for a type * @type: the type to get the global state of * * Returns the current global state for a given wireless * device type.
*/ bool rfkill_get_global_sw_state(constenum rfkill_type type)
{ return rfkill_global_states[type].cur;
} #endif
/* * No need to care about prev/setblock ... this is for uevent only * and that will get triggered by rfkill_set_block anyway.
*/
swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
__rfkill_set_sw_state(rfkill, sw); if (hw)
rfkill->state |= RFKILL_BLOCK_HW; else
rfkill->state &= ~RFKILL_BLOCK_HW;
spin_unlock_irqrestore(&rfkill->lock, flags);
if (!rfkill->registered) {
rfkill->persistent = true;
} else { if (swprev != sw || hwprev != hw)
schedule_work(&rfkill->uevent_work);
/* * Poll hardware state -- driver will use one of the * rfkill_set{,_hw,_sw}_state functions and use its * return value to update the current status.
*/
rfkill->ops->poll(rfkill, rfkill->data);
while (list_empty(&data->events)) { if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN; goto out;
}
mutex_unlock(&data->mtx); /* since we re-check and it just compares pointers, * using !list_empty() without locking isn't a problem
*/
ret = wait_event_interruptible(data->read_wait,
!list_empty(&data->events));
mutex_lock(&data->mtx);
if (ret) goto out;
}
ev = list_first_entry(&data->events, struct rfkill_int_event,
list);
sz = min_t(unsignedlong, sizeof(ev->ev), count);
sz = min_t(unsignedlong, sz, data->max_size);
ret = sz; if (copy_to_user(buf, &ev->ev, sz))
ret = -EFAULT;
/* we don't need the 'hard' variable but accept it */ if (count < RFKILL_EVENT_SIZE_V1 - 1) return -EINVAL;
/* * Copy as much data as we can accept into our 'ev' buffer, * but tell userspace how much we've copied so it can determine * our API version even in a write() call, if it cares.
*/
count = min(count, sizeof(ev));
count = min_t(size_t, count, data->max_size); if (copy_from_user(&ev, buf, count)) return -EFAULT;
if (ev.type >= NUM_RFKILL_TYPES) return -EINVAL;
mutex_lock(&rfkill_global_mutex);
switch (ev.op) { case RFKILL_OP_CHANGE_ALL:
rfkill_update_global_state(ev.type, ev.soft);
list_for_each_entry(rfkill, &rfkill_list, node) if (rfkill->type == ev.type ||
ev.type == RFKILL_TYPE_ALL)
rfkill_set_block(rfkill, ev.soft);
ret = 0; break; case RFKILL_OP_CHANGE:
list_for_each_entry(rfkill, &rfkill_list, node) if (rfkill->idx == ev.idx &&
(rfkill->type == ev.type ||
ev.type == RFKILL_TYPE_ALL))
rfkill_set_block(rfkill, ev.soft);
ret = 0; break; default:
ret = -EINVAL; break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.