/* * The following applies for Radix MMU. Hash MMU has different requirements, * and so is not supported. * * Changing mm requires context synchronising instructions on both sides of * the context switch, as well as a hwsync between the last instruction for * which the address of an associated storage access was translated using * the current context. * * switch_mm_irqs_off() performs an isync after the context switch. It is * the responsibility of the caller to perform the CSI and hwsync before * starting/stopping the temp mm.
*/ staticstruct mm_struct *start_using_temp_mm(struct mm_struct *temp_mm)
{ struct mm_struct *orig_mm = current->active_mm;
area = get_vm_area(PAGE_SIZE, 0); if (!area) {
WARN_ONCE(1, "Failed to create text area for cpu %d\n",
cpu); return -1;
}
// Map/unmap the area to ensure all page tables are pre-allocated
addr = (unsignedlong)area->addr;
err = map_patch_area(empty_zero_page, addr); if (err) return err;
mm = mm_alloc(); if (WARN_ON(!mm)) goto fail_no_mm;
/* * Choose a random page-aligned address from the interval * [PAGE_SIZE .. DEFAULT_MAP_WINDOW - PAGE_SIZE]. * The lower address bound is PAGE_SIZE to avoid the zero-page.
*/
addr = (1 + (get_random_long() % (DEFAULT_MAP_WINDOW / PAGE_SIZE - 2))) << PAGE_SHIFT;
/* * PTE allocation uses GFP_KERNEL which means we need to * pre-allocate the PTE here because we cannot do the * allocation during patching when IRQs are disabled. * * Using get_locked_pte() to avoid open coding, the lock * is unnecessary.
*/
pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto fail_no_pte;
pte_unmap_unlock(pte, ptl);
if (mm_patch_enabled())
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/text_poke_mm:online",
text_area_cpu_up_mm,
text_area_cpu_down_mm); else
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/text_poke:online",
text_area_cpu_up,
text_area_cpu_down);
/* cpuhp_setup_state returns >= 0 on success */ if (WARN_ON(ret < 0)) return;
/* * This can be called for kernel text or a module.
*/ staticint map_patch_area(void *addr, unsignedlong text_poke_addr)
{ unsignedlong pfn = get_patch_pfn(addr);
/* context synchronisation performed by __patch_instruction (isync or exception) */
stop_using_temp_mm(patching_mm, orig_mm);
pte_clear(patching_mm, text_poke_addr, pte); /* * ptesync to order PTE update before TLB invalidation done * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
*/
local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
/* * During early early boot patch_instruction is called * when text_poke_area is not ready, but we still need * to allow patching. We just do the plain old patching
*/ if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) ||
!static_branch_likely(&poking_init_done)) return __patch_mem(addr, val, addr, is_dword);
/* * A page is mapped and instructions that fit the page are patched. * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
*/ staticint __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool repeat_instr)
{ struct mm_struct *patching_mm, *orig_mm; unsignedlong pfn = get_patch_pfn(addr); unsignedlong text_poke_addr;
spinlock_t *ptl;
u32 *patch_addr;
pte_t *pte; int err;
/* context synchronisation performed by __patch_instructions */
stop_using_temp_mm(patching_mm, orig_mm);
pte_clear(patching_mm, text_poke_addr, pte); /* * ptesync to order PTE update before TLB invalidation done * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
*/
local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
pte_unmap_unlock(pte, ptl);
return err;
}
/* * A page is mapped and instructions that fit the page are patched. * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
*/ staticint __do_patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
{ unsignedlong pfn = get_patch_pfn(addr); unsignedlong text_poke_addr;
u32 *patch_addr;
pte_t *pte; int err;
/* * Patch 'addr' with 'len' bytes of instructions from 'code'. * * If repeat_instr is true, the same instruction is filled for * 'len' bytes.
*/ int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
{ while (len > 0) { unsignedlong flags;
size_t plen; int err;
len -= plen;
addr = (u32 *)((unsignedlong)addr + plen); if (!repeat_instr)
code = (u32 *)((unsignedlong)code + plen);
}
return 0;
}
NOKPROBE_SYMBOL(patch_instructions);
int patch_branch(u32 *addr, unsignedlong target, int flags)
{
ppc_inst_t instr;
if (create_branch(&instr, addr, target, flags)) return -ERANGE;
return patch_instruction(addr, instr);
}
/* * Helper to check if a given instruction is a conditional branch * Derived from the conditional checks in analyse_instr()
*/ bool is_conditional_branch(ppc_inst_t instr)
{ unsignedint opcode = ppc_inst_primary_opcode(instr);
if (opcode == 16) /* bc, bca, bcl, bcla */ returntrue; if (opcode == 19) { switch ((ppc_inst_val(instr) >> 1) & 0x3ff) { case 16: /* bclr, bclrl */ case 528: /* bcctr, bcctrl */ case 560: /* bctar, bctarl */ returntrue;
}
} returnfalse;
}
NOKPROBE_SYMBOL(is_conditional_branch);
int create_cond_branch(ppc_inst_t *instr, const u32 *addr, unsignedlong target, int flags)
{ long offset;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.