/* * The fix_to_virt(, idx) needs a const value (not a dynamic variable of * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses". * So use '__always_inline' and 'const unsigned int fixmap' here.
*/ static __always_inline void *patch_map(void *addr, constunsignedint fixmap)
{
uintptr_t uintaddr = (uintptr_t) addr; struct page *page;
/* * Only two pages can be mapped at a time for writing.
*/ if (len + offset_in_page(addr) > 2 * PAGE_SIZE) return -EINVAL; /* * Before reaching here, it was expected to lock the text_mutex * already, so we don't need to give another lock here and could * ensure that it was safe between each cores.
*/
lockdep_assert_held(&text_mutex);
preempt_disable();
if (across_pages)
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
waddr = patch_map(addr, FIX_TEXT_POKE0);
memset(waddr, c, len);
/* * We could have just patched a function that is about to be * called so make sure we don't execute partially patched * instructions by flushing the icache as soon as possible.
*/
local_flush_icache_range((unsignedlong)waddr,
(unsignedlong)waddr + len);
/* * Only two pages can be mapped at a time for writing.
*/ if (len + offset_in_page(addr) > 2 * PAGE_SIZE) return -EINVAL;
/* * Before reaching here, it was expected to lock the text_mutex * already, so we don't need to give another lock here and could * ensure that it was safe between each cores. * * We're currently using stop_machine() for ftrace & kprobes, and while * that ensures text_mutex is held before installing the mappings it * does not ensure text_mutex is held by the calling thread. That's * safe but triggers a lockdep failure, so just elide it for that * specific case.
*/ if (!riscv_patch_in_stop_machine)
lockdep_assert_held(&text_mutex);
preempt_disable();
if (across_pages)
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
waddr = patch_map(addr, FIX_TEXT_POKE0);
ret = copy_to_kernel_nofault(waddr, insn, len);
/* * We could have just patched a function that is about to be * called so make sure we don't execute partially patched * instructions by flushing the icache as soon as possible.
*/
local_flush_icache_range((unsignedlong)waddr,
(unsignedlong)waddr + len);
/* * __patch_insn_set() can only work on 2 pages at a time so call it in a * loop with len <= 2 * PAGE_SIZE.
*/ while (len) {
size = min(len, PAGE_SIZE * 2 - offset_in_page(addr));
ret = __patch_insn_set(addr, c, size); if (ret) return ret;
addr += size;
len -= size;
}
return 0;
}
NOKPROBE_SYMBOL(patch_insn_set);
int patch_text_set_nosync(void *addr, u8 c, size_t len)
{ int ret;
ret = patch_insn_set(addr, c, len); if (!ret)
flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);
int patch_insn_write(void *addr, constvoid *insn, size_t len)
{
size_t size; int ret;
/* * Copy the instructions to the destination address, two pages at a time * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE.
*/ while (len) {
size = min(len, PAGE_SIZE * 2 - offset_in_page(addr));
ret = __patch_insn_write(addr, insn, size); if (ret) return ret;
addr += size;
insn += size;
len -= size;
}
return 0;
}
NOKPROBE_SYMBOL(patch_insn_write);
int patch_text_nosync(void *addr, constvoid *insns, size_t len)
{ int ret;
ret = patch_insn_write(addr, insns, len); if (!ret)
flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);
return ret;
}
NOKPROBE_SYMBOL(patch_text_nosync);
staticint patch_text_cb(void *data)
{ struct patch_insn *patch = data; int ret = 0;
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
ret = patch_insn_write(patch->addr, patch->insns, patch->len); /* * Make sure the patching store is effective *before* we * increment the counter which releases all waiting CPUs * by using the release variant of atomic increment. The * release pairs with the call to local_flush_icache_all() * on the waiting CPU.
*/
atomic_inc_return_release(&patch->cpu_count);
} else { while (atomic_read(&patch->cpu_count) <= num_online_cpus())
cpu_relax();
/* * kprobes takes text_mutex, before calling patch_text(), but as we call * calls stop_machine(), the lockdep assertion in patch_insn_write() * gets confused by the context in which the lock is taken. * Instead, ensure the lock is held before calling stop_machine(), and * set riscv_patch_in_stop_machine to skip the check in * patch_insn_write().
*/
lockdep_assert_held(&text_mutex);
riscv_patch_in_stop_machine = true;
ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
riscv_patch_in_stop_machine = false; return ret;
}
NOKPROBE_SYMBOL(patch_text);
Messung V0.5
¤ Dauer der Verarbeitung: 0.1 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.