/* * Entrires are sorted by key.
*/ if (jump_entry_key(jea) < jump_entry_key(jeb)) return -1;
if (jump_entry_key(jea) > jump_entry_key(jeb)) return 1;
/* * In the batching mode, entries should also be sorted by the code * inside the already sorted list of entries, enabling a bsearch in * the vector.
*/ if (jump_entry_code(jea) < jump_entry_code(jeb)) return -1;
if (jump_entry_code(jea) > jump_entry_code(jeb)) return 1;
/* * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h. * The use of 'atomic_read()' requires atomic.h and its problematic for some * kernel headers such as kernel.h and others. Since static_key_count() is not * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok * to have it be a function here. Similarly, for 'static_key_enable()' and * 'static_key_disable()', which require bug.h. This should allow jump_label.h * to be included from most/all places for CONFIG_JUMP_LABEL.
*/ int static_key_count(struct static_key *key)
{ /* * -1 means the first static_key_slow_inc() is in progress. * static_key_enabled() must return true, so return 1 here.
*/ int n = atomic_read(&key->enabled);
return n >= 0 ? n : 1;
}
EXPORT_SYMBOL_GPL(static_key_count);
/* * static_key_fast_inc_not_disabled - adds a user for a static key * @key: static key that must be already enabled * * The caller must make sure that the static key can't get disabled while * in this function. It doesn't patch jump labels, only adds a user to * an already enabled static key. * * Returns true if the increment was done. Unlike refcount_t the ref counter * is not saturated, but will fail to increment on overflow.
*/ bool static_key_fast_inc_not_disabled(struct static_key *key)
{ int v;
STATIC_KEY_CHECK_USE(key); /* * Negative key->enabled has a special meaning: it sends * static_key_slow_inc/dec() down the slow path, and it is non-zero * so it counts as "enabled" in jump_label_update(). * * The INT_MAX overflow condition is either used by the networking * code to reset or detected in the slow path of * static_key_slow_inc_cpuslocked().
*/
v = atomic_read(&key->enabled); do { if (v <= 0 || v == INT_MAX) returnfalse;
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
/* * Careful if we get concurrent static_key_slow_inc/dec() calls; * later calls must wait for the first one to _finish_ the * jump_label_update() process. At the same time, however, * the jump_label_update() call below wants to see * static_key_enabled(&key) for jumps to be updated properly.
*/ if (static_key_fast_inc_not_disabled(key)) returntrue;
guard(mutex)(&jump_label_mutex); /* Try to mark it as 'enabling in progress. */ if (!atomic_cmpxchg(&key->enabled, 0, -1)) {
jump_label_update(key); /* * Ensure that when static_key_fast_inc_not_disabled() or * static_key_dec_not_one() observe the positive value, * they must also observe all the text changes.
*/
atomic_set_release(&key->enabled, 1);
} else { /* * While holding the mutex this should never observe * anything else than a value >= 1 and succeed
*/ if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) returnfalse;
} returntrue;
}
staticbool static_key_dec_not_one(struct static_key *key)
{ int v;
/* * Go into the slow path if key::enabled is less than or equal than * one. One is valid to shut down the key, anything less than one * is an imbalance, which is handled at the call site. * * That includes the special case of '-1' which is set in * static_key_slow_inc_cpuslocked(), but that's harmless as it is * fully serialized in the slow path below. By the time this task * acquires the jump label lock the value is back to one and the * retry under the lock must succeed.
*/
v = atomic_read(&key->enabled); do { /* * Warn about the '-1' case though; since that means a * decrement is concurrent with a first (0->1) increment. IOW * people are trying to disable something that wasn't yet fully * enabled. This suggests an ordering problem on the user side.
*/
WARN_ON_ONCE(v < 0);
/* * Warn about underflow, and lie about success in an attempt to * not make things worse.
*/ if (WARN_ON_ONCE(v == 0)) returntrue;
if (v <= 1) returnfalse;
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
returntrue;
}
staticvoid __static_key_slow_dec_cpuslocked(struct static_key *key)
{
lockdep_assert_cpus_held(); int val;
if (static_key_dec_not_one(key)) return;
guard(mutex)(&jump_label_mutex);
val = atomic_read(&key->enabled); /* * It should be impossible to observe -1 with jump_label_mutex held, * see static_key_slow_inc_cpuslocked().
*/ if (WARN_ON_ONCE(val == -1)) return; /* * Cannot already be 0, something went sideways.
*/ if (WARN_ON_ONCE(val == 0)) return;
if (atomic_dec_and_test(&key->enabled))
jump_label_update(key);
}
iter = iter_start; while (iter < iter_stop) { if (init || !jump_entry_is_init(iter)) { if (addr_conflict(iter, start, end)) return 1;
}
iter++;
}
return 0;
}
#ifndef arch_jump_label_transform_static staticvoid arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type)
{ /* nothing to do on most architectures */
} #endif
/*** * A 'struct static_key' uses a union such that it either points directly * to a table of 'struct jump_entry' or to a linked list of modules which in * turn point to 'struct jump_entry' tables. * * The two lower bits of the pointer are used to keep track of which pointer * type is in use and to store the initial branch direction, we use an access * function which preserves these bits.
*/ staticvoid static_key_set_entries(struct static_key *key, struct jump_entry *entries)
{ unsignedlong type;
/* See the comment in linux/jump_label.h */ return enabled ^ branch;
}
staticbool jump_label_can_update(struct jump_entry *entry, bool init)
{ /* * Cannot update code that was in an init text area.
*/ if (!init && jump_entry_is_init(entry)) returnfalse;
if (!kernel_text_address(jump_entry_code(entry))) { /* * This skips patching built-in __exit, which * is part of init_section_contains() but is * not part of kernel_text_address(). * * Skipping built-in __exit is fine since it * will never be executed.
*/
WARN_ONCE(!jump_entry_is_init(entry), "can't patch jump_label at %pS",
(void *)jump_entry_code(entry)); returnfalse;
}
if (!jump_label_can_update(entry, init)) continue;
if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) { /* * Queue is full: Apply the current queue and try again.
*/
arch_jump_label_transform_apply();
BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
}
}
arch_jump_label_transform_apply();
} #endif
/* * Since we are initializing the static_key.enabled field with * with the 'raw' int values (to avoid pulling in atomic.h) in * jump_label.h, let's make sure that is safe. There are only two * cases to check since we initialize to 0 or 1.
*/
BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
/*** * key->type and key->next are the same via union. * This sets key->next and preserves the type bits. * * See additional comments above static_key_set_entries().
*/ staticvoid static_key_set_mod(struct static_key *key, struct static_key_mod *mod)
{ unsignedlong type;
scoped_guard(rcu) {
mod = __module_text_address((unsignedlong)start);
WARN_ON_ONCE(__module_text_address((unsignedlong)end) != mod); if (!try_module_get(mod))
mod = NULL;
} if (!mod) return 0;
/* * If the key was sealed at init, then there's no need to keep a * reference to its module entries - just patch them now and be * done with it.
*/ if (static_key_sealed(key)) goto do_poke;
jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); if (!jlm) return -ENOMEM; if (!static_key_linked(key)) {
jlm2 = kzalloc(sizeof(struct static_key_mod),
GFP_KERNEL); if (!jlm2) {
kfree(jlm); return -ENOMEM;
}
scoped_guard(rcu)
jlm2->mod = __module_address((unsignedlong)key);
/* Only update if we've changed from our initial state */
do_poke: if (jump_label_type(iter) != jump_label_init_type(iter))
__jump_label_update(key, iter, iter_stop, true);
}
/* No memory during module load */ if (WARN_ON(!jlm)) continue;
if (prev == &key->next)
static_key_set_mod(key, jlm->next); else
*prev = jlm->next;
kfree(jlm);
jlm = static_key_mod(key); /* if only one etry is left, fold it back into the static_key */ if (jlm->next == NULL) {
static_key_set_entries(key, jlm->entries);
static_key_clear_linked(key);
kfree(jlm);
}
}
}
staticint
jump_label_module_notify(struct notifier_block *self, unsignedlong val, void *data)
{ struct module *mod = data; int ret = 0;
cpus_read_lock();
jump_label_lock();
switch (val) { case MODULE_STATE_COMING:
ret = jump_label_add_module(mod); if (ret) {
WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
jump_label_del_module(mod);
} break; case MODULE_STATE_GOING:
jump_label_del_module(mod); break;
}
static __init int jump_label_init_module(void)
{ return register_module_notifier(&jump_label_module_nb);
}
early_initcall(jump_label_init_module);
#endif/* CONFIG_MODULES */
/*** * jump_label_text_reserved - check if addr range is reserved * @start: start text addr * @end: end text addr * * checks if the text addr located between @start and @end * overlaps with any of the jump label patch addresses. Code * that wants to modify kernel text should first verify that * it does not overlap with any of the jump label addresses. * Caller must hold jump_label_mutex. * * returns 1 if there is an overlap, 0 otherwise
*/ int jump_label_text_reserved(void *start, void *end)
{ bool init = system_state < SYSTEM_RUNNING; int ret = __jump_label_text_reserved(__start___jump_table,
__stop___jump_table, start, end, init);
if (ret) return ret;
#ifdef CONFIG_MODULES
ret = __jump_label_mod_text_reserved(start, end); #endif return ret;
}
if (static_key_linked(key)) {
__jump_label_mod_update(key); return;
}
scoped_guard(rcu) {
mod = __module_address((unsignedlong)key); if (mod) {
stop = mod->jump_entries + mod->num_jump_entries;
init = mod->state == MODULE_STATE_COMING;
}
} #endif
entry = static_key_entries(key); /* if there are no users, entry can be NULL */ if (entry)
__jump_label_update(key, entry, stop, init);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.