/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Generation of main entry point for the guest, exception handling. * * Copyright (C) 2012 MIPS Technologies, Inc. * Authors: Sanjay Lal <sanjayl@kymasys.com> * * Copyright (C) 2016 Imagination Technologies Ltd.
*/
/* * The version of this function in tlbex.c uses current_cpu_type(), but for KVM * we assume symmetry.
*/ staticint c0_kscratch(void)
{ return 31;
}
/** * kvm_mips_entry_setup() - Perform global setup for entry code. * * Perform global setup for entry code, such as choosing a scratch register. * * Returns: 0 on success. * -errno on failure.
*/ int kvm_mips_entry_setup(void)
{ /* * We prefer to use KScratchN registers if they are available over the * defaults above, which may not work on all cores.
*/ unsignedint kscratch_mask = cpu_data[0].kscratch_mask;
if (pgd_reg != -1)
kscratch_mask &= ~BIT(pgd_reg);
/* Pick a scratch register for storing VCPU */ if (kscratch_mask) {
scratch_vcpu[0] = c0_kscratch();
scratch_vcpu[1] = ffs(kscratch_mask) - 1;
kscratch_mask &= ~BIT(scratch_vcpu[1]);
}
/* Pick a scratch register to use as a temp for saving state */ if (kscratch_mask) {
scratch_tmp[0] = c0_kscratch();
scratch_tmp[1] = ffs(kscratch_mask) - 1;
kscratch_mask &= ~BIT(scratch_tmp[1]);
}
return 0;
}
staticvoid kvm_mips_build_save_scratch(u32 **p, unsignedint tmp, unsignedint frame)
{ /* Save the VCPU scratch register value in cp0_epc of the stack frame */
UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
/* Save the temp scratch register value in cp0_cause of stack frame */ if (scratch_tmp[0] == c0_kscratch()) {
UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
}
}
/** * build_set_exc_base() - Assemble code to write exception base address. * @p: Code buffer pointer. * @reg: Source register (generated code may set WG bit in @reg). * * Assemble code to modify the exception base address in the EBase register, * using the appropriately sized access and setting the WG bit if necessary.
*/ staticinlinevoid build_set_exc_base(u32 **p, unsignedint reg)
{ if (cpu_has_ebase_wg) { /* Set WG so that all the bits get written */
uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
UASM_i_MTC0(p, reg, C0_EBASE);
} else {
uasm_i_mtc0(p, reg, C0_EBASE);
}
}
/** * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU. * @addr: Address to start writing code. * * Assemble the start of the vcpu_run function to run a guest VCPU. The function * conforms to the following prototype: * * int vcpu_run(struct kvm_vcpu *vcpu); * * The exit from the guest and return to the caller is handled by the code * generated by kvm_mips_build_ret_to_host(). * * Returns: Next address after end of written function.
*/ void *kvm_mips_build_vcpu_run(void *addr)
{
u32 *p = addr; unsignedint i;
/* * GPR_A0: vcpu
*/
/* k0/k1 not being used in host kernel context */
UASM_i_ADDIU(&p, GPR_K1, GPR_SP, -(int)sizeof(struct pt_regs)); for (i = 16; i < 32; ++i) { if (i == 24)
i = 28;
UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1);
}
/* Save host status */
uasm_i_mfc0(&p, GPR_V0, C0_STATUS);
UASM_i_SW(&p, GPR_V0, offsetof(struct pt_regs, cp0_status), GPR_K1);
/* Save scratch registers, will be used to store pointer to vcpu etc */
kvm_mips_build_save_scratch(&p, GPR_V1, GPR_K1);
/* VCPU scratch register has pointer to vcpu */
UASM_i_MTC0(&p, GPR_A0, scratch_vcpu[0], scratch_vcpu[1]);
/* * Save the host stack to VCPU, used for exception processing * when we exit from the Guest
*/
UASM_i_SW(&p, GPR_SP, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1);
/* Save the kernel gp as well */
UASM_i_SW(&p, GPR_GP, offsetof(struct kvm_vcpu_arch, host_gp), GPR_K1);
/* * Setup status register for running the guest in UM, interrupts * are disabled
*/
UASM_i_LA(&p, GPR_K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
uasm_i_ehb(&p);
/* load up the new EBASE */
UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1);
build_set_exc_base(&p, GPR_K0);
/* * Now that the new EBASE has been loaded, unset BEV, set * interrupt mask as it was but make sure that timer interrupts * are enabled
*/
uasm_i_addiu(&p, GPR_K0, GPR_ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
uasm_i_andi(&p, GPR_V0, GPR_V0, ST0_IM);
uasm_i_or(&p, GPR_K0, GPR_K0, GPR_V0);
uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
uasm_i_ehb(&p);
p = kvm_mips_build_enter_guest(p);
return p;
}
/** * kvm_mips_build_enter_guest() - Assemble code to resume guest execution. * @addr: Address to start writing code. * * Assemble the code to resume guest execution. This code is common between the * initial entry into the guest from the host, and returning from the exit * handler back to the guest. * * Returns: Next address after end of written function.
*/ staticvoid *kvm_mips_build_enter_guest(void *addr)
{
u32 *p = addr; unsignedint i; struct uasm_label labels[2]; struct uasm_reloc relocs[2]; struct uasm_label __maybe_unused *l = labels; struct uasm_reloc __maybe_unused *r = relocs;
/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */ if (cpu_has_ldpte)
UASM_i_MFC0(&p, GPR_K0, C0_PWBASE); else
UASM_i_MFC0(&p, GPR_K0, c0_kscratch(), pgd_reg);
UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1);
/* * Set up KVM GPA pgd. * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): * - call tlbmiss_handler_setup_pgd(mm->pgd) * - write mm->pgd into CP0_PWBase * * We keep GPR_S0 pointing at struct kvm so we can load the ASID below.
*/
UASM_i_LW(&p, GPR_S0, (int)offsetof(struct kvm_vcpu, kvm) -
(int)offsetof(struct kvm_vcpu, arch), GPR_K1);
UASM_i_LW(&p, GPR_A0, offsetof(struct kvm, arch.gpa_mm.pgd), GPR_S0);
UASM_i_LA(&p, GPR_T9, (unsignedlong)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, GPR_RA, GPR_T9); /* delay slot */ if (cpu_has_htw)
UASM_i_MTC0(&p, GPR_A0, C0_PWBASE); else
uasm_i_nop(&p);
/* Set GM bit to setup eret to VZ guest context */
uasm_i_addiu(&p, GPR_V1, GPR_ZERO, 1);
uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0);
uasm_i_ins(&p, GPR_K0, GPR_V1, MIPS_GCTL0_GM_SHIFT, 1);
uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0);
if (cpu_has_guestid) { /* * Set root mode GuestID, so that root TLB refill handler can * use the correct GuestID in the root TLB.
*/
/* Get current GuestID */
uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1); /* Set GuestCtl1.RID = GuestCtl1.ID */
uasm_i_ext(&p, GPR_T1, GPR_T0, MIPS_GCTL1_ID_SHIFT,
MIPS_GCTL1_ID_WIDTH);
uasm_i_ins(&p, GPR_T0, GPR_T1, MIPS_GCTL1_RID_SHIFT,
MIPS_GCTL1_RID_WIDTH);
uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1);
/* GuestID handles dealiasing so we don't need to touch ASID */ goto skip_asid_restore;
}
/* Set the root ASID for the Guest */
UASM_i_ADDIU(&p, GPR_T1, GPR_S0,
offsetof(struct kvm, arch.gpa_mm.context.asid));
/* t1: contains the base of the ASID array, need to get the cpu id */ /* smp_processor_id */
uasm_i_lw(&p, GPR_T2, offsetof(struct thread_info, cpu), GPR_GP); /* index the ASID array */
uasm_i_sll(&p, GPR_T2, GPR_T2, ilog2(sizeof(long)));
UASM_i_ADDU(&p, GPR_T3, GPR_T1, GPR_T2);
UASM_i_LW(&p, GPR_K0, 0, GPR_T3); #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE /* * reuse ASID array offset * cpuinfo_mips is a multiple of sizeof(long)
*/
uasm_i_addiu(&p, GPR_T3, GPR_ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
uasm_i_mul(&p, GPR_T2, GPR_T2, GPR_T3);
/* load the guest context from VCPU and return */ for (i = 1; i < 32; ++i) { /* Guest k0/k1 loaded later */ if (i == GPR_K0 || i == GPR_K1) continue;
UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1);
}
/* Save guest k1 into scratch register */
UASM_i_MTC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]);
/* Get the VCPU pointer from the VCPU scratch register */
UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]);
/* Save guest k0 into VCPU structure */
UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu, arch.gprs[GPR_K0]), GPR_K1);
/* * Some of the common tlbex code uses current_cpu_type(). For KVM we * assume symmetry and just disable preemption to silence the warning.
*/
preempt_disable();
#ifdef CONFIG_CPU_LOONGSON64
UASM_i_MFC0(&p, GPR_K1, C0_PGD);
uasm_i_lddir(&p, GPR_K0, GPR_K1, 3); /* global page dir */ #ifndef __PAGETABLE_PMD_FOLDED
uasm_i_lddir(&p, GPR_K1, GPR_K0, 1); /* middle page dir */ #endif
uasm_i_ldpte(&p, GPR_K1, 0); /* even */
uasm_i_ldpte(&p, GPR_K1, 1); /* odd */
uasm_i_tlbwr(&p); #else /* * Now for the actual refill bit. A lot of this can be common with the * Linux TLB refill handler, however we don't need to handle so many * cases. We only need to handle user mode refills, and user mode runs * with 32-bit addressing. * * Therefore the branch to label_vmalloc generated by build_get_pmde64() * that isn't resolved should never actually get taken and is harmless * to leave in place for now.
*/
#ifdef CONFIG_64BIT
build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */ #else
build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */ #endif
/** * kvm_mips_build_exception() - Assemble first level guest exception handler. * @addr: Address to start writing code. * @handler: Address of common handler (within range of @addr). * * Assemble exception vector code for guest execution. The generated vector will * branch to the common exception handler generated by kvm_mips_build_exit(). * * Returns: Next address after end of written function.
*/ void *kvm_mips_build_exception(void *addr, void *handler)
{
u32 *p = addr; struct uasm_label labels[2]; struct uasm_reloc relocs[2]; struct uasm_label *l = labels; struct uasm_reloc *r = relocs;
/** * kvm_mips_build_exit() - Assemble common guest exit handler. * @addr: Address to start writing code. * * Assemble the generic guest exit handling code. This is called by the * exception vectors (generated by kvm_mips_build_exception()), and calls * kvm_mips_handle_exit(), then either resumes the guest or returns to the host * depending on the return value. * * Returns: Next address after end of written function.
*/ void *kvm_mips_build_exit(void *addr)
{
u32 *p = addr; unsignedint i; struct uasm_label labels[3]; struct uasm_reloc relocs[3]; struct uasm_label *l = labels; struct uasm_reloc *r = relocs;
/* * Generic Guest exception handler. We end up here when the guest * does something that causes a trap to kernel mode. * * Both k0/k1 registers will have already been saved (k0 into the vcpu * structure, and k1 into the scratch_tmp register). * * The k1 register will already contain the kvm_vcpu_arch pointer.
*/
/* Start saving Guest context to VCPU */ for (i = 0; i < 32; ++i) { /* Guest k0/k1 saved later */ if (i == GPR_K0 || i == GPR_K1) continue;
UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1);
}
#ifndef CONFIG_CPU_MIPSR6 /* We need to save hi/lo and restore them on the way out */
uasm_i_mfhi(&p, GPR_T0);
UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, hi), GPR_K1);
/* * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process * the exception
*/
UASM_i_MFC0(&p, GPR_K0, C0_EPC);
UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, pc), GPR_K1);
/* * Set up normal Linux process pgd. * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): * - call tlbmiss_handler_setup_pgd(mm->pgd) * - write mm->pgd into CP0_PWBase
*/
UASM_i_LW(&p, GPR_A0,
offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1);
UASM_i_LA(&p, GPR_T9, (unsignedlong)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, GPR_RA, GPR_T9); /* delay slot */ if (cpu_has_htw)
UASM_i_MTC0(&p, GPR_A0, C0_PWBASE); else
uasm_i_nop(&p);
/* Clear GM bit so we don't enter guest mode when EXL is cleared */
uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0);
uasm_i_ins(&p, GPR_K0, GPR_ZERO, MIPS_GCTL0_GM_SHIFT, 1);
uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0);
/* Save GuestCtl0 so we can access GExcCode after CPU migration */
uasm_i_sw(&p, GPR_K0,
offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), GPR_K1);
if (cpu_has_guestid) { /* * Clear root mode GuestID, so that root TLB operations use the * root GuestID in the root TLB.
*/
uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1); /* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
uasm_i_ins(&p, GPR_T0, GPR_ZERO, MIPS_GCTL1_RID_SHIFT,
MIPS_GCTL1_RID_WIDTH);
uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1);
}
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
uasm_i_addiu(&p, GPR_AT, GPR_ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
uasm_i_and(&p, GPR_V0, GPR_V0, GPR_AT);
uasm_i_lui(&p, GPR_AT, ST0_CU0 >> 16);
uasm_i_or(&p, GPR_V0, GPR_V0, GPR_AT); #ifdef CONFIG_64BIT
uasm_i_ori(&p, GPR_V0, GPR_V0, ST0_SX | ST0_UX); #endif
uasm_i_mtc0(&p, GPR_V0, C0_STATUS);
uasm_i_ehb(&p);
/* Jump to handler */ /* * XXXKYMA: not sure if this is safe, how large is the stack?? * Now jump to the kvm_mips_handle_exit() to see if we can deal * with this in the kernel
*/
uasm_i_move(&p, GPR_A0, GPR_S0);
UASM_i_LA(&p, GPR_T9, (unsignedlong)kvm_mips_handle_exit);
uasm_i_jalr(&p, GPR_RA, GPR_T9);
UASM_i_ADDIU(&p, GPR_SP, GPR_SP, -CALLFRAME_SIZ);
uasm_resolve_relocs(relocs, labels);
p = kvm_mips_build_ret_from_exit(p);
return p;
}
/** * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler. * @addr: Address to start writing code. * * Assemble the code to handle the return from kvm_mips_handle_exit(), either * resuming the guest or returning to the host depending on the return value. * * Returns: Next address after end of written function.
*/ staticvoid *kvm_mips_build_ret_from_exit(void *addr)
{
u32 *p = addr; struct uasm_label labels[2]; struct uasm_reloc relocs[2]; struct uasm_label *l = labels; struct uasm_reloc *r = relocs;
/* * Check return value, should tell us if we are returning to the * host (handle I/O etc)or resuming the guest
*/
uasm_i_andi(&p, GPR_T0, GPR_V0, RESUME_HOST);
uasm_il_bnez(&p, &r, GPR_T0, label_return_to_host);
uasm_i_nop(&p);
p = kvm_mips_build_ret_to_guest(p);
uasm_l_return_to_host(&l, p);
p = kvm_mips_build_ret_to_host(p);
uasm_resolve_relocs(relocs, labels);
return p;
}
/** * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest. * @addr: Address to start writing code. * * Assemble the code to handle return from the guest exit handler * (kvm_mips_handle_exit()) back to the guest. * * Returns: Next address after end of written function.
*/ staticvoid *kvm_mips_build_ret_to_guest(void *addr)
{
u32 *p = addr;
/* Put the saved pointer to vcpu (s0) back into the scratch register */
UASM_i_MTC0(&p, GPR_S0, scratch_vcpu[0], scratch_vcpu[1]);
/* Load up the Guest EBASE to minimize the window where BEV is set */
UASM_i_LW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1);
/* Switch EBASE back to the one used by KVM */
uasm_i_mfc0(&p, GPR_V1, C0_STATUS);
uasm_i_lui(&p, GPR_AT, ST0_BEV >> 16);
uasm_i_or(&p, GPR_K0, GPR_V1, GPR_AT);
uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
uasm_i_ehb(&p);
build_set_exc_base(&p, GPR_T0);
/* Setup status register for running guest in UM */
uasm_i_ori(&p, GPR_V1, GPR_V1, ST0_EXL | KSU_USER | ST0_IE);
UASM_i_LA(&p, GPR_AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
uasm_i_and(&p, GPR_V1, GPR_V1, GPR_AT);
uasm_i_mtc0(&p, GPR_V1, C0_STATUS);
uasm_i_ehb(&p);
p = kvm_mips_build_enter_guest(p);
return p;
}
/** * kvm_mips_build_ret_to_host() - Assemble code to return to the host. * @addr: Address to start writing code. * * Assemble the code to handle return from the guest exit handler * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run * function generated by kvm_mips_build_vcpu_run(). * * Returns: Next address after end of written function.
*/ staticvoid *kvm_mips_build_ret_to_host(void *addr)
{
u32 *p = addr; unsignedint i;
/* EBASE is already pointing to Linux */
UASM_i_LW(&p, GPR_K1, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1);
UASM_i_ADDIU(&p, GPR_K1, GPR_K1, -(int)sizeof(struct pt_regs));
/* * r2/v0 is the return code, shift it down by 2 (arithmetic) * to recover the err code
*/
uasm_i_sra(&p, GPR_K0, GPR_V0, 2);
uasm_i_move(&p, GPR_V0, GPR_K0);
/* Load context saved on the host stack */ for (i = 16; i < 31; ++i) { if (i == 24)
i = 28;
UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1);
}
/* Restore GPR_RA, which is the address we will return to */
UASM_i_LW(&p, GPR_RA, offsetof(struct pt_regs, regs[GPR_RA]), GPR_K1);
uasm_i_jr(&p, GPR_RA);
uasm_i_nop(&p);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.