// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. * * Authors: * Paul Mackerras <paulus@au1.ibm.com> * Alexander Graf <agraf@suse.de> * Kevin Wolf <mail@kevin-wolf.de> * * Description: KVM functions specific to running on Book 3S * processors in hypervisor mode (specifically POWER7 and later). * * This file is derived from arch/powerpc/kvm/book3s.c, * by Alexander Graf <agraf@suse.de>.
*/
/* Used to indicate that a guest page fault needs to be handled */ #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1) /* Used to indicate that a guest passthrough interrupt needs to be handled */ #define RESUME_PASSTHROUGH (RESUME_GUEST | RESUME_FLAG_ARCH2)
/* Used as a "null" value for timebase values */ #define TB_NIL (~(u64)0)
staticint dynamic_mt_modes = 6;
module_param(dynamic_mt_modes, int, 0644);
MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); staticint target_smt_mode;
module_param(target_smt_mode, int, 0644);
MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
staticbool one_vm_per_core;
module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires POWER8 or older)");
module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644);
MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); #endif
/* If set, guests are allowed to create and control nested guests */ staticbool nested = true;
module_param(nested, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)");
/* * RWMR values for POWER8. These control the rate at which PURR * and SPURR count and should be set according to the number of * online threads in the vcore being run.
*/ #define RWMR_RPA_P8_1THREAD 0x164520C62609AECAUL #define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9UL #define RWMR_RPA_P8_3THREAD 0x164520C62609AECAUL #define RWMR_RPA_P8_4THREAD 0x199A421245058DA9UL #define RWMR_RPA_P8_5THREAD 0x164520C62609AECAUL #define RWMR_RPA_P8_6THREAD 0x164520C62609AECAUL #define RWMR_RPA_P8_7THREAD 0x164520C62609AECAUL #define RWMR_RPA_P8_8THREAD 0x164520C62609AECAUL
/* Used to traverse the list of runnable threads for a given vcore */ #define for_each_runnable_thread(i, vcpu, vc) \ for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
/* If we're a nested hypervisor, fall back to ordinary IPIs for now */ if (kvmhv_on_pseries()) returnfalse;
/* On POWER9 we can use msgsnd to IPI any cpu */ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
msg |= get_hard_smp_processor_id(cpu);
smp_mb();
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); returntrue;
}
/* On POWER8 for IPIs to threads in the same core, use msgsnd */ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
preempt_disable(); if (cpu_first_thread_sibling(cpu) ==
cpu_first_thread_sibling(smp_processor_id())) {
msg |= cpu_thread_in_core(cpu);
smp_mb();
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
preempt_enable(); returntrue;
}
preempt_enable();
}
#ifdefined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) if (cpu >= 0 && cpu < nr_cpu_ids) { if (paca_ptrs[cpu]->kvm_hstate.xics_phys) {
xics_wake_cpu(cpu); returntrue;
}
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); returntrue;
} #endif
returnfalse;
}
staticvoid kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
{ int cpu; struct rcuwait *waitp;
/* * rcuwait_wake_up contains smp_mb() which orders prior stores that * create pending work vs below loads of cpu fields. The other side * is the barrier in vcpu run that orders setting the cpu fields vs * testing for pending work.
*/
waitp = kvm_arch_vcpu_get_wait(vcpu); if (rcuwait_wake_up(waitp))
++vcpu->stat.generic.halt_wakeup;
cpu = READ_ONCE(vcpu->arch.thread_cpu); if (cpu >= 0 && kvmppc_ipi_thread(cpu)) return;
/* CPU points to the first thread of the core */
cpu = vcpu->cpu; if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
smp_send_reschedule(cpu);
}
/* * We use the vcpu_load/put functions to measure stolen time. * * Stolen time is counted as time when either the vcpu is able to * run as part of a virtual core, but the task running the vcore * is preempted or sleeping, or when the vcpu needs something done * in the kernel by the task running the vcpu, but that task is * preempted or sleeping. Those two things have to be counted * separately, since one of the vcpu tasks will take on the job * of running the core, and the other vcpu tasks in the vcore will * sleep waiting for it to do that, but that sleep shouldn't count * as stolen time. * * Hence we accumulate stolen time when the vcpu can run as part of * a vcore using vc->stolen_tb, and the stolen time when the vcpu * needs its task to do other things in the kernel (for example, * service a page fault) in busy_stolen. We don't accumulate * stolen time for a vcore when it is inactive, or for a vcpu * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of * a misnomer; it means that the vcpu task is not executing in * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in * the kernel. We don't have any way of dividing up that time * between time that the vcpu is genuinely stopped, time that * the task is actively working on behalf of the vcpu, and time * that the task is preempted, so we don't count any of it as * stolen. * * Updates to busy_stolen are protected by arch.tbacct_lock; * updates to vc->stolen_tb are protected by the vcore->stoltb_lock * lock. The stolen times are measured in units of timebase ticks. * (Note that the != TB_NIL checks below are purely defensive; * they should never fail.) * * The POWER9 path is simpler, one vcpu per virtual core so the * former case does not exist. If a vcpu is preempted when it is * BUSY_IN_HOST and not ceded or otherwise blocked, then accumulate * the stolen cycles in busy_stolen. RUNNING is not a preemptible * state in the P9 path.
*/
/* * We can test vc->runner without taking the vcore lock, * because only this task ever sets vc->runner to this * vcpu, and once it is set to this vcpu, only this task * ever sets it to NULL.
*/ if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
kvmppc_core_end_stolen(vc, now);
if (cpu_has_feature(CPU_FTR_ARCH_300)) { /* * In the P9 path, RUNNABLE is not preemptible * (nor takes host interrupts)
*/
WARN_ON_ONCE(vcpu->arch.state == KVMPPC_VCPU_RUNNABLE); /* * Account stolen time when preempted while the vcpu task is * running in the kernel (but not in qemu, which is INACTIVE).
*/ if (task_is_running(current) &&
vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
vcpu->arch.busy_preempt = mftb(); return;
}
now = mftb();
if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
kvmppc_core_start_stolen(vc, now);
/* Dummy value used in computing PCR value below */ #define PCR_ARCH_31 (PCR_ARCH_300 << 1)
staticinlineunsignedlong map_pcr_to_cap(unsignedlong pcr)
{ unsignedlong cap = 0;
switch (pcr) { case PCR_ARCH_300:
cap = H_GUEST_CAP_POWER9; break; case PCR_ARCH_31: if (cpu_has_feature(CPU_FTR_P11_PVR))
cap = H_GUEST_CAP_POWER11; else
cap = H_GUEST_CAP_POWER10; break; default: break;
}
/* We can (emulate) our own architecture version and anything older */ if (cpu_has_feature(CPU_FTR_P11_PVR) || cpu_has_feature(CPU_FTR_ARCH_31))
host_pcr_bit = PCR_ARCH_31; elseif (cpu_has_feature(CPU_FTR_ARCH_300))
host_pcr_bit = PCR_ARCH_300; elseif (cpu_has_feature(CPU_FTR_ARCH_207S))
host_pcr_bit = PCR_ARCH_207; elseif (cpu_has_feature(CPU_FTR_ARCH_206))
host_pcr_bit = PCR_ARCH_206; else
host_pcr_bit = PCR_ARCH_205;
/* Determine lowest PCR bit needed to run guest in given PVR level */
guest_pcr_bit = host_pcr_bit; if (arch_compat) { switch (arch_compat) { case PVR_ARCH_205:
guest_pcr_bit = PCR_ARCH_205; break; case PVR_ARCH_206: case PVR_ARCH_206p:
guest_pcr_bit = PCR_ARCH_206; break; case PVR_ARCH_207:
guest_pcr_bit = PCR_ARCH_207; break; case PVR_ARCH_300:
guest_pcr_bit = PCR_ARCH_300; break; case PVR_ARCH_31: case PVR_ARCH_31_P11:
guest_pcr_bit = PCR_ARCH_31; break; default: return -EINVAL;
}
}
if (kvmhv_on_pseries() && kvmhv_is_nestedv2()) { /* * 'arch_compat == 0' would mean the guest should default to * L1's compatibility. In this case, the guest would pick * host's PCR and evaluate the corresponding capabilities.
*/
cap = map_pcr_to_cap(guest_pcr_bit); if (!(cap & nested_capabilities)) return -EINVAL;
}
spin_lock(&vc->lock);
vc->arch_compat = arch_compat;
kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LOGICAL_PVR); /* * Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit * Also set all reserved PCR bits
*/
vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK;
spin_unlock(&vc->lock);
return 0;
}
staticvoid kvmppc_dump_regs(struct kvm_vcpu *vcpu)
{ int r;
/* Length for a per-processor buffer is passed in at offset 4 in the buffer */ struct reg_vpa {
u32 dummy; union {
__be16 hword;
__be32 word;
} length;
};
switch (subfunc) { case H_VPA_REG_VPA: /* register VPA */ /* * The size of our lppaca is 1kB because of the way we align * it for the guest to avoid crossing a 4kB boundary. We only * use 640 bytes of the structure though, so we should accept * clients that set a size of 640.
*/
BUILD_BUG_ON(sizeof(struct lppaca) != 640); if (len < sizeof(struct lppaca)) break;
vpap = &tvcpu->arch.vpa;
err = 0; break;
case H_VPA_REG_DTL: /* register DTL */ if (len < sizeof(struct dtl_entry)) break;
len -= len % sizeof(struct dtl_entry);
/* Check that they have previously registered a VPA */
err = H_RESOURCE; if (!vpa_is_registered(&tvcpu->arch.vpa)) break;
vpap = &tvcpu->arch.dtl;
err = 0; break;
case H_VPA_REG_SLB: /* register SLB shadow buffer */ /* Check that they have previously registered a VPA */
err = H_RESOURCE; if (!vpa_is_registered(&tvcpu->arch.vpa)) break;
vpap = &tvcpu->arch.slb_shadow;
err = 0; break;
case H_VPA_DEREG_VPA: /* deregister VPA */ /* Check they don't still have a DTL or SLB buf registered */
err = H_RESOURCE; if (vpa_is_registered(&tvcpu->arch.dtl) ||
vpa_is_registered(&tvcpu->arch.slb_shadow)) break;
/* * We need to pin the page pointed to by vpap->next_gpa, * but we can't call kvmppc_pin_guest_page under the lock * as it does get_user_pages() and down_read(). So we * have to drop the lock, pin the page, then get the lock * again and check that a new area didn't get registered * in the meantime.
*/ for (;;) {
gpa = vpap->next_gpa;
spin_unlock(&vcpu->arch.vpa_update_lock);
va = NULL;
nb = 0; if (gpa)
va = kvmppc_pin_guest_page(kvm, gpa, &nb);
spin_lock(&vcpu->arch.vpa_update_lock); if (gpa == vpap->next_gpa) break; /* sigh... unpin that one and try again */ if (va)
kvmppc_unpin_guest_page(kvm, va, gpa, false);
}
vpap->update_pending = 0; if (va && nb < vpap->len) { /* * If it's now too short, it must be that userspace * has changed the mappings underlying guest memory, * so unregister the region.
*/
kvmppc_unpin_guest_page(kvm, va, gpa, false);
va = NULL;
}
*old_vpap = *vpap;
vpap->gpa = gpa;
vpap->pinned_addr = va;
vpap->dirty = false; if (va)
vpap->pinned_end = va + vpap->len;
}
if (!(vcpu->arch.vpa.update_pending ||
vcpu->arch.slb_shadow.update_pending ||
vcpu->arch.dtl.update_pending)) return;
spin_lock(&vcpu->arch.vpa_update_lock); if (vcpu->arch.vpa.update_pending) {
kvmppc_update_vpa(vcpu, &vcpu->arch.vpa, &old_vpa); if (old_vpa.pinned_addr) { if (kvmhv_is_nestedv2())
kvmhv_nestedv2_set_vpa(vcpu, ~0ull);
kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa,
old_vpa.dirty);
} if (vcpu->arch.vpa.pinned_addr) {
init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); if (kvmhv_is_nestedv2())
kvmhv_nestedv2_set_vpa(vcpu, __pa(vcpu->arch.vpa.pinned_addr));
}
} if (vcpu->arch.dtl.update_pending) {
kvmppc_update_vpa(vcpu, &vcpu->arch.dtl, &old_vpa); if (old_vpa.pinned_addr)
kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa,
old_vpa.dirty);
vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
vcpu->arch.dtl_index = 0;
} if (vcpu->arch.slb_shadow.update_pending) {
kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow, &old_vpa); if (old_vpa.pinned_addr)
kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa,
old_vpa.dirty);
}
spin_unlock(&vcpu->arch.vpa_update_lock);
}
/* * Return the accumulated stolen time for the vcore up until `now'. * The caller should hold the vcore lock.
*/ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
{
u64 p; unsignedlong flags;
WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
spin_lock_irqsave(&vc->stoltb_lock, flags);
p = vc->stolen_tb; if (vc->vcore_state != VCORE_INACTIVE &&
vc->preempt_tb != TB_NIL)
p += now - vc->preempt_tb;
spin_unlock_irqrestore(&vc->stoltb_lock, flags); return p;
}
/* See if there is a doorbell interrupt pending for a vcpu */ staticbool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
{ int thr; struct kvmppc_vcore *vc;
if (vcpu->arch.doorbell_request) returntrue; if (cpu_has_feature(CPU_FTR_ARCH_300)) returnfalse; /* * Ensure that the read of vcore->dpdes comes after the read * of vcpu->doorbell_request. This barrier matches the * smp_wmb() in kvmppc_guest_entry_inject().
*/
smp_rmb();
vc = vcpu->arch.vcore;
thr = vcpu->vcpu_id - vc->first_vcpuid; return !!(vc->dpdes & (1 << thr));
}
staticbool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
{ if (kvmppc_get_arch_compat(vcpu) >= PVR_ARCH_207) returntrue; if ((!kvmppc_get_arch_compat(vcpu)) &&
cpu_has_feature(CPU_FTR_ARCH_207S)) returntrue; returnfalse;
}
staticint kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsignedlong mflags, unsignedlong resource, unsignedlong value1, unsignedlong value2)
{ switch (resource) { case H_SET_MODE_RESOURCE_SET_CIABR: if (!kvmppc_power8_compatible(vcpu)) return H_P2; if (value2) return H_P4; if (mflags) return H_UNSUPPORTED_FLAG_START; /* Guests can't breakpoint the hypervisor */ if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER) return H_P3;
kvmppc_set_ciabr_hv(vcpu, value1); return H_SUCCESS; case H_SET_MODE_RESOURCE_SET_DAWR0: if (!kvmppc_power8_compatible(vcpu)) return H_P2; if (!ppc_breakpoint_available()) return H_P2; if (mflags) return H_UNSUPPORTED_FLAG_START; if (value2 & DABRX_HYP) return H_P4;
kvmppc_set_dawr0_hv(vcpu, value1);
kvmppc_set_dawrx0_hv(vcpu, value2); return H_SUCCESS; case H_SET_MODE_RESOURCE_SET_DAWR1: if (!kvmppc_power8_compatible(vcpu)) return H_P2; if (!ppc_breakpoint_available()) return H_P2; if (!cpu_has_feature(CPU_FTR_DAWR1)) return H_P2; if (!vcpu->kvm->arch.dawr1_enabled) return H_FUNCTION; if (mflags) return H_UNSUPPORTED_FLAG_START; if (value2 & DABRX_HYP) return H_P4;
kvmppc_set_dawr1_hv(vcpu, value1);
kvmppc_set_dawrx1_hv(vcpu, value2); return H_SUCCESS; case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: /* * KVM does not support mflags=2 (AIL=2) and AIL=1 is reserved. * Keep this in synch with kvmppc_filter_guest_lpcr_hv.
*/ if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) &&
kvmhv_vcpu_is_radix(vcpu) && mflags == 3) return H_UNSUPPORTED_FLAG_START; return H_TOO_HARD; default: return H_TOO_HARD;
}
}
/* Copy guest memory in place - must reside within a single memslot */ staticint kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from, unsignedlong len)
{ struct kvm_memory_slot *to_memslot = NULL; struct kvm_memory_slot *from_memslot = NULL; unsignedlong to_addr, from_addr; int r;
/* Get HPA for from address */
from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT); if (!from_memslot) return -EFAULT; if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages)
<< PAGE_SHIFT)) return -EINVAL;
from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT); if (kvm_is_error_hva(from_addr)) return -EFAULT;
from_addr |= (from & (PAGE_SIZE - 1));
/* Get HPA for to address */
to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT); if (!to_memslot) return -EFAULT; if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages)
<< PAGE_SHIFT)) return -EINVAL;
to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT); if (kvm_is_error_hva(to_addr)) return -EFAULT;
to_addr |= (to & (PAGE_SIZE - 1));
/* Perform copy */
r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr,
len); if (r) return -EFAULT;
mark_page_dirty(kvm, to >> PAGE_SHIFT); return 0;
}
/* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */ if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED)) return H_PARAMETER;
/* dest (and src if copy_page flag set) must be page aligned */ if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask))) return H_PARAMETER;
/* zero and/or copy the page as determined by the flags */ if (flags & H_COPY_PAGE) {
ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz); if (ret < 0) return H_PARAMETER;
} elseif (flags & H_ZERO_PAGE) {
ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz); if (ret < 0) return H_PARAMETER;
}
/* * We expect to have been called by the real mode handler * (kvmppc_rm_h_confer()) which would have directly returned * H_SUCCESS if the source vcore wasn't idle (e.g. if it may * have useful work to do and should not confer) so we don't * recheck that here. * * In the case of the P9 single vcpu per vcore case, the real * mode handler is not called but no other threads are in the * source vcore.
*/ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
spin_lock(&vcore->lock); if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
vcore->vcore_state != VCORE_INACTIVE &&
vcore->runner)
target = vcore->runner;
spin_unlock(&vcore->lock);
}
/* Send the error out to userspace via KVM_RUN */ return rc; case H_LOGICAL_CI_LOAD:
ret = kvmppc_h_logical_ci_load(vcpu); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_LOGICAL_CI_STORE:
ret = kvmppc_h_logical_ci_store(vcpu); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_SET_MODE:
ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6),
kvmppc_get_gpr(vcpu, 7)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_XIRR: case H_CPPR: case H_EOI: case H_IPI: case H_IPOLL: case H_XIRR_X: if (kvmppc_xics_enabled(vcpu)) { if (xics_on_xive()) {
ret = H_NOT_AVAILABLE; return RESUME_GUEST;
}
ret = kvmppc_xics_hcall(vcpu, req); break;
} return RESUME_HOST; case H_SET_DABR:
ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4)); break; case H_SET_XDABR:
ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5)); break; #ifdef CONFIG_SPAPR_TCE_IOMMU case H_GET_TCE:
ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_PUT_TCE:
ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_PUT_TCE_INDIRECT:
ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6),
kvmppc_get_gpr(vcpu, 7)); if (ret == H_TOO_HARD) return RESUME_HOST; break; case H_STUFF_TCE:
ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6),
kvmppc_get_gpr(vcpu, 7)); if (ret == H_TOO_HARD) return RESUME_HOST; break; #endif case H_RANDOM: { unsignedlong rand;
if (!arch_get_random_seed_longs(&rand, 1))
ret = H_HARDWARE;
kvmppc_set_gpr(vcpu, 4, rand); break;
} case H_RPT_INVALIDATE:
ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6),
kvmppc_get_gpr(vcpu, 7),
kvmppc_get_gpr(vcpu, 8),
kvmppc_get_gpr(vcpu, 9)); break;
case H_SET_PARTITION_TABLE:
ret = H_FUNCTION; if (nesting_enabled(kvm))
ret = kvmhv_set_partition_table(vcpu); break; case H_ENTER_NESTED:
ret = H_FUNCTION; if (!nesting_enabled(kvm)) break;
ret = kvmhv_enter_nested_guest(vcpu); if (ret == H_INTERRUPT) {
kvmppc_set_gpr(vcpu, 3, 0);
vcpu->arch.hcall_needed = 0; return -EINTR;
} elseif (ret == H_TOO_HARD) {
kvmppc_set_gpr(vcpu, 3, 0);
vcpu->arch.hcall_needed = 0; return RESUME_HOST;
} break; case H_TLB_INVALIDATE:
ret = H_FUNCTION; if (nesting_enabled(kvm))
ret = kvmhv_do_nested_tlbie(vcpu); break; case H_COPY_TOFROM_GUEST:
ret = H_FUNCTION; if (nesting_enabled(kvm))
ret = kvmhv_copy_tofrom_guest_nested(vcpu); break; case H_PAGE_INIT:
ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6)); break; case H_SVM_PAGE_IN:
ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S)
ret = kvmppc_h_svm_page_in(kvm,
kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6)); break; case H_SVM_PAGE_OUT:
ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S)
ret = kvmppc_h_svm_page_out(kvm,
kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6)); break; case H_SVM_INIT_START:
ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S)
ret = kvmppc_h_svm_init_start(kvm); break; case H_SVM_INIT_DONE:
ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S)
ret = kvmppc_h_svm_init_done(kvm); break; case H_SVM_INIT_ABORT: /* * Even if that call is made by the Ultravisor, the SSR1 value * is the guest context one, with the secure bit clear as it has * not yet been secured. So we can't check it here. * Instead the kvm->arch.secure_guest flag is checked inside * kvmppc_h_svm_init_abort().
*/
ret = kvmppc_h_svm_init_abort(kvm); break;
/* * Handle H_CEDE in the P9 path where we don't call the real-mode hcall * handlers in book3s_hv_rmhandlers.S. * * This has to be done early, not in kvmppc_pseries_do_hcall(), so * that the cede logic in kvmppc_run_single_vcpu() works properly.
*/ staticvoid kvmppc_cede(struct kvm_vcpu *vcpu)
{
__kvmppc_set_msr_hv(vcpu, __kvmppc_get_msr_hv(vcpu) | MSR_EE);
vcpu->arch.ceded = 1;
smp_mb(); if (vcpu->arch.prodded) {
vcpu->arch.prodded = 0;
smp_mb();
vcpu->arch.ceded = 0;
}
}
staticint kvmppc_hcall_impl_hv(unsignedlong cmd)
{ switch (cmd) { case H_CEDE: case H_PROD: case H_CONFER: case H_REGISTER_VPA: case H_SET_MODE: #ifdef CONFIG_SPAPR_TCE_IOMMU case H_GET_TCE: case H_PUT_TCE: case H_PUT_TCE_INDIRECT: case H_STUFF_TCE: #endif case H_LOGICAL_CI_LOAD: case H_LOGICAL_CI_STORE: #ifdef CONFIG_KVM_XICS case H_XIRR: case H_CPPR: case H_EOI: case H_IPI: case H_IPOLL: case H_XIRR_X: #endif case H_PAGE_INIT: case H_RPT_INVALIDATE: return 1;
}
/* See if it's in the real-mode table */ return kvmppc_hcall_impl_hv_realmode(cmd);
}
if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
EMULATE_DONE) { /* * Fetch failed, so return to guest and * try executing it again.
*/ return RESUME_GUEST;
}
nthreads = vcpu->kvm->arch.emul_smt_mode;
dpdes = 0;
cpu = vcpu->vcpu_id & ~(nthreads - 1); for (thr = 0; thr < nthreads; ++thr, ++cpu) {
v = kvmppc_find_vcpu(vcpu->kvm, cpu); if (!v) continue; /* * If the vcpu is currently running on a physical cpu thread, * interrupt it in order to pull it out of the guest briefly, * which will update its vcore->dpdes value.
*/
pcpu = READ_ONCE(v->cpu); if (pcpu >= 0)
smp_call_function_single(pcpu, do_nothing, NULL, 1); if (kvmppc_doorbell_pending(v))
dpdes |= 1 << thr;
} return dpdes;
}
/* * On POWER9, emulate doorbell-related instructions in order to * give the guest the illusion of running on a multi-threaded core. * The instructions emulated are msgsndp, msgclrp, mfspr TIR, * and mfspr DPDES.
*/ staticint kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
{
u32 inst, rb, thr; unsignedlong arg; struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tvcpu;
ppc_inst_t pinst;
/* * If the lppaca had pmcregs_in_use clear when we exited the guest, then * HFSCR_PM is cleared for next entry. If the guest then tries to access * the PMU SPRs, we get this facility unavailable interrupt. Putting HFSCR_PM * back in the guest HFSCR will cause the next entry to load the PMU SPRs and * allow the guest access to continue.
*/ staticint kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu)
{ if (!(vcpu->arch.hfscr_permitted & HFSCR_PM)) return EMULATE_FAIL;
staticint kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, struct task_struct *tsk)
{ struct kvm_run *run = vcpu->run; int r = RESUME_HOST;
vcpu->stat.sum_exits++;
/* * This can happen if an interrupt occurs in the last stages * of guest entry or the first stages of guest exit (i.e. after * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV * and before setting it to KVM_GUEST_MODE_HOST_HV). * That can happen due to a bug, or due to a machine check * occurring at just the wrong time.
*/ if (!kvmhv_is_nestedv2() && (__kvmppc_get_msr_hv(vcpu) & MSR_HV)) {
printk(KERN_EMERG "KVM trap in HV mode!\n");
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
vcpu->arch.shregs.msr);
kvmppc_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
run->hw.hardware_exit_reason = vcpu->arch.trap; return RESUME_HOST;
}
run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1; switch (vcpu->arch.trap) { /* We're good on these - the host merely wanted to get our attention */ case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER:
WARN_ON_ONCE(1); /* Should never happen */
vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
fallthrough; case BOOK3S_INTERRUPT_HV_DECREMENTER:
vcpu->stat.dec_exits++;
r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_EXTERNAL: case BOOK3S_INTERRUPT_H_DOORBELL: case BOOK3S_INTERRUPT_H_VIRT:
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST; break; /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/ case BOOK3S_INTERRUPT_HMI: case BOOK3S_INTERRUPT_PERFMON: case BOOK3S_INTERRUPT_SYSTEM_RESET:
r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_MACHINE_CHECK: { static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST); /* * Print the MCE event to host console. Ratelimit so the guest * can't flood the host log.
*/ if (__ratelimit(&rs))
machine_check_print_event_info(&vcpu->arch.mce_evt,false, true);
/* * If the guest can do FWNMI, exit to userspace so it can * deliver a FWNMI to the guest. * Otherwise we synthesize a machine check for the guest * so that it knows that the machine check occurred.
*/ if (!vcpu->kvm->arch.fwnmi_enabled) {
ulong flags = (__kvmppc_get_msr_hv(vcpu) & 0x083c0000) |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
kvmppc_core_queue_machine_check(vcpu, flags);
r = RESUME_GUEST; break;
}
/* Exit to guest with KVM_EXIT_NMI as exit reason */
run->exit_reason = KVM_EXIT_NMI;
run->hw.hardware_exit_reason = vcpu->arch.trap; /* Clear out the old NMI status from run->flags */
run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK; /* Now set the NMI status */ if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV; else
run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV;
r = RESUME_HOST; break;
} case BOOK3S_INTERRUPT_PROGRAM:
{
ulong flags; /* * Normally program interrupts are delivered directly * to the guest by the hardware, but we can get here * as a result of a hypervisor emulation interrupt * (e40) getting turned into a 700 by BML RTAS.
*/
flags = (__kvmppc_get_msr_hv(vcpu) & 0x1f0000ull) |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST; break;
} case BOOK3S_INTERRUPT_SYSCALL:
{ int i;
if (!kvmhv_is_nestedv2() && unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { /* * Guest userspace executed sc 1. This can only be * reached by the P9 path because the old path * handles this case in realmode hcall handlers.
*/ if (!kvmhv_vcpu_is_radix(vcpu)) { /* * A guest could be running PR KVM, so this * may be a PR KVM hcall. It must be reflected * to the guest kernel as a sc interrupt.
*/
kvmppc_core_queue_syscall(vcpu);
} else { /* * Radix guests can not run PR KVM or nested HV * hash guests which might run PR KVM, so this * is always a privilege fault. Send a program * check to guest kernel.
*/
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
}
r = RESUME_GUEST; break;
}
/* * hcall - gather args and set exit_reason. This will next be * handled by kvmppc_pseries_do_hcall which may be able to deal * with it and resume guest, or may punt to userspace.
*/
run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); for (i = 0; i < 9; ++i)
run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
run->exit_reason = KVM_EXIT_PAPR_HCALL;
vcpu->arch.hcall_needed = 1;
r = RESUME_HOST; break;
} /* * We get these next two if the guest accesses a page which it thinks * it has mapped but which is not actually present, either because * it is for an emulated I/O device or because the corresonding * host page has been paged out. * * Any other HDSI/HISI interrupts have been handled already for P7/8 * guests. For POWER9 hash guests not using rmhandlers, basic hash * fault handling is done here.
*/ case BOOK3S_INTERRUPT_H_DATA_STORAGE: { unsignedlong vsid; long err;
if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) &&
unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) {
r = RESUME_GUEST; /* Just retry if it's the canary */ break;
}
if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { /* * Radix doesn't require anything, and pre-ISAv3.0 hash * already attempted to handle this in rmhandlers. The * hash fault handling below is v3 only (it uses ASDR * via fault_gpa).
*/
r = RESUME_PAGE_FAULT; break;
}
if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
kvmppc_core_queue_data_storage(vcpu,
kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
r = RESUME_GUEST; break;
}
err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
vsid, vcpu->arch.fault_dsisr, false); if (err == 0) {
r = RESUME_GUEST;
} elseif (err == -1) {
r = RESUME_PAGE_FAULT;
} else {
kvmppc_core_queue_inst_storage(vcpu,
err | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
r = RESUME_GUEST;
} break;
}
/* * This occurs if the guest executes an illegal instruction. * If the guest debug is disabled, generate a program interrupt * to the guest. If guest debug is enabled, we need to check * whether the instruction is a software breakpoint instruction. * Accordingly return to Guest or Host.
*/ case BOOK3S_INTERRUPT_H_EMUL_ASSIST: if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
swab32(vcpu->arch.emul_inst) :
vcpu->arch.emul_inst; if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
r = kvmppc_emulate_debug_inst(vcpu);
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
r = RESUME_GUEST;
} break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM case BOOK3S_INTERRUPT_HV_SOFTPATCH: /* * This occurs for various TM-related instructions that * we need to emulate on POWER9 DD2.2. We have already * handled the cases where the guest was in real-suspend * mode and was transitioning to transactional state.
*/
r = kvmhv_p9_tm_emulation(vcpu); if (r != -1) break;
fallthrough; /* go to facility unavailable handler */ #endif
/* * This occurs if the guest (kernel or userspace), does something that * is prohibited by HFSCR. * On POWER9, this could be a doorbell instruction that we need * to emulate. * Otherwise, we just generate a program interrupt to the guest.
*/ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
u64 cause = kvmppc_get_hfscr_hv(vcpu) >> 56;
r = EMULATE_FAIL; if (cpu_has_feature(CPU_FTR_ARCH_300)) { switch (cause) { case FSCR_MSGP_LG:
r = kvmppc_emulate_doorbell_instr(vcpu); break; case FSCR_PM_LG:
r = kvmppc_pmu_unavailable(vcpu); break; case FSCR_EBB_LG:
r = kvmppc_ebb_unavailable(vcpu); break; case FSCR_TM_LG:
r = kvmppc_tm_unavailable(vcpu); break; default: break;
}
} if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
r = RESUME_GUEST;
} break;
}
case BOOK3S_INTERRUPT_HV_RM_HARD:
r = RESUME_PASSTHROUGH; break; default:
kvmppc_dump_regs(vcpu);
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
__kvmppc_get_msr_hv(vcpu));
run->hw.hardware_exit_reason = vcpu->arch.trap;
r = RESUME_HOST; break;
}
return r;
}
staticint kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
{ int r; int srcu_idx;
vcpu->stat.sum_exits++;
/* * This can happen if an interrupt occurs in the last stages * of guest entry or the first stages of guest exit (i.e. after * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV * and before setting it to KVM_GUEST_MODE_HOST_HV). * That can happen due to a bug, or due to a machine check * occurring at just the wrong time.
*/ if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
pr_emerg("KVM trap in HV mode while nested!\n");
pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
__kvmppc_get_msr_hv(vcpu));
kvmppc_dump_regs(vcpu); return RESUME_HOST;
} switch (vcpu->arch.trap) { /* We're good on these - the host merely wanted to get our attention */ case BOOK3S_INTERRUPT_HV_DECREMENTER:
vcpu->stat.dec_exits++;
r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_EXTERNAL:
vcpu->stat.ext_intr_exits++;
r = RESUME_HOST; break; case BOOK3S_INTERRUPT_H_DOORBELL: case BOOK3S_INTERRUPT_H_VIRT:
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST; break; /* These need to go to the nested HV */ case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER:
vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
vcpu->stat.dec_exits++;
r = RESUME_HOST; break; /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/ case BOOK3S_INTERRUPT_HMI: case BOOK3S_INTERRUPT_PERFMON: case BOOK3S_INTERRUPT_SYSTEM_RESET:
r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_MACHINE_CHECK:
{ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST); /* Pass the machine check to the L1 guest */
r = RESUME_HOST; /* Print the MCE event to host console. */ if (__ratelimit(&rs))
machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); break;
} /* * We get these next two if the guest accesses a page which it thinks * it has mapped but which is not actually present, either because * it is for an emulated I/O device or because the corresonding * host page has been paged out.
*/ case BOOK3S_INTERRUPT_H_DATA_STORAGE:
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmhv_nested_page_fault(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); break; case BOOK3S_INTERRUPT_H_INST_STORAGE:
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
DSISR_SRR1_MATCH_64S; if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmhv_nested_page_fault(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM case BOOK3S_INTERRUPT_HV_SOFTPATCH: /* * This occurs for various TM-related instructions that * we need to emulate on POWER9 DD2.2. We have already * handled the cases where the guest was in real-suspend * mode and was transitioning to transactional state.
*/
r = kvmhv_p9_tm_emulation(vcpu); if (r != -1) break;
fallthrough; /* go to facility unavailable handler */ #endif
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
r = RESUME_HOST; break;
case BOOK3S_INTERRUPT_HV_RM_HARD:
vcpu->arch.trap = 0;
r = RESUME_GUEST; if (!xics_on_xive())
kvmppc_xics_rm_complete(vcpu, 0); break; case BOOK3S_INTERRUPT_SYSCALL:
{ unsignedlong req = kvmppc_get_gpr(vcpu, 3);
/* * The H_RPT_INVALIDATE hcalls issued by nested * guests for process-scoped invalidations when * GTSE=0, are handled here in L0.
*/ if (req == H_RPT_INVALIDATE) {
r = kvmppc_nested_h_rpt_invalidate(vcpu); break;
}
r = RESUME_HOST; break;
} default:
r = RESUME_HOST; break;
}
return r;
}
staticint kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{ int i;
memset(sregs, 0, sizeof(struct kvm_sregs));
sregs->pvr = vcpu->arch.pvr; for (i = 0; i < vcpu->arch.slb_max; i++) {
sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
}
return 0;
}
staticint kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{ int i, j;
/* Only accept the same PVR as the host's, since we can't spoof it */ if (sregs->pvr != vcpu->arch.pvr) return -EINVAL;
j = 0; for (i = 0; i < vcpu->arch.slb_nr; i++) { if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
++j;
}
}
vcpu->arch.slb_max = j;
return 0;
}
/* * Enforce limits on guest LPCR values based on hardware availability, * guest configuration, and possibly hypervisor support and security * concerns.
*/ unsignedlong kvmppc_filter_lpcr_hv(struct kvm *kvm, unsignedlong lpcr)
{ /* LPCR_TC only applies to HPT guests */ if (kvm_is_radix(kvm))
lpcr &= ~LPCR_TC;
/* On POWER8 and above, userspace can modify AIL */ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
lpcr &= ~LPCR_AIL; if ((lpcr & LPCR_AIL) != LPCR_AIL_3)
lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */ /* * On some POWER9s we force AIL off for radix guests to prevent * executing in MSR[HV]=1 mode with the MMU enabled and PIDR set to * guest, which can result in Q0 translations with LPID=0 PID=PIDR to * be cached, which the host TLB management does not expect.
*/ if (kvm_is_radix(kvm) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
lpcr &= ~LPCR_AIL;
/* * On POWER9, allow userspace to enable large decrementer for the * guest, whether or not the host has it enabled.
*/ if (!cpu_has_feature(CPU_FTR_ARCH_300))
lpcr &= ~LPCR_LD;
/* * If ILE (interrupt little-endian) has changed, update the * MSR_LE bit in the intr_msr for each vcpu in this vcore.
*/ if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { struct kvm_vcpu *vcpu; unsignedlong i;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.