/* maximum percentage of steal time for polling. >100 is treated like 100 */ static u8 halt_poll_max_steal = 10;
module_param(halt_poll_max_steal, byte, 0644);
MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
/* if set to true, the GISA will be initialized and used if available */ staticbool use_gisa = true;
module_param(use_gisa, bool, 0644);
MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
/* maximum diag9c forwarding per second */ unsignedint diag9c_forwarding_hz;
module_param(diag9c_forwarding_hz, uint, 0644);
MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
/* * allow asynchronous deinit for protected guests; enable by default since * the feature is opt-in anyway
*/ staticint async_destroy = 1;
module_param(async_destroy, int, 0444);
MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
/* * For now we handle at most 16 double words as this is what the s390 base * kernel handles and stores in the prefix page. If we ever need to go beyond * this, this requires changes to code, but the external uapi can stay.
*/ #define SIZE_INTERNAL 16
/* * Base feature mask that defines default mask for facilities. Consists of the * defines in FACILITIES_KVM and the non-hypervisor managed bits.
*/ staticunsignedlong kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM }; /* * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL * and defines the facilities that can be enabled via a cpu model.
*/ staticunsignedlong kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
/* available cpu features supported by kvm */ static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); /* available subfunctions indicated via query / "test bit" */ staticstruct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
/* * The TOD jumps by delta, we have to compensate this by adding * -delta to the epoch.
*/
delta = -delta;
/* sign-extension - we're adding to signed values below */ if ((s64)delta < 0)
delta_idx = -1;
scb->epoch += delta; if (scb->ecd & ECD_MEF) {
scb->epdx += delta_idx; if (scb->epoch < delta)
scb->epdx += 1;
}
}
/* * This callback is executed during stop_machine(). All CPUs are therefore * temporarily stopped. In order not to change guest behavior, we have to * disable preemption whenever we touch the epoch of kvm and the VCPUs, * so a CPU won't be stopped while calculating with the epoch.
*/ staticint kvm_clock_sync(struct notifier_block *notifier, unsignedlong val, void *v)
{ struct kvm *kvm; struct kvm_vcpu *vcpu; unsignedlong i; unsignedlonglong *delta = v;
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); if (i == 0) {
kvm->arch.epoch = vcpu->arch.sie_block->epoch;
kvm->arch.epdx = vcpu->arch.sie_block->epdx;
} if (vcpu->arch.cputm_enabled)
vcpu->arch.cputm_start += *delta; if (vcpu->arch.vsie_block)
kvm_clock_sync_scb(vcpu->arch.vsie_block,
*delta);
}
} return NOTIFY_OK;
}
if (test_facility(146)) /* MSA8 */
__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
kvm_s390_available_subfunc.kma);
if (test_facility(155)) /* MSA9 */
__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
kvm_s390_available_subfunc.kdsa);
if (test_facility(150)) /* SORTL */
__sortl_query(&kvm_s390_available_subfunc.sortl);
if (test_facility(151)) /* DFLTCC */
__dfltcc_query(&kvm_s390_available_subfunc.dfltcc);
if (test_facility(201)) /* PFCR */
pfcr_query(&kvm_s390_available_subfunc.pfcr);
if (machine_has_esop())
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP); /* * We need SIE support, ESOP (PROT_READ protection for gmap_shadow), * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
*/ if (!sclp.has_sief2 || !machine_has_esop() || !sclp.has_64bscao ||
!test_facility(3) || !nested) return;
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2); if (sclp.has_64bscao)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO); if (sclp.has_siif)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF); if (sclp.has_gpere)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE); if (sclp.has_gsls)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS); if (sclp.has_ib)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB); if (sclp.has_cei)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI); if (sclp.has_ibs)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS); if (sclp.has_kss)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS); /* * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make * all skey handling functions read/set the skey from the PGSTE * instead of the real storage key. * * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make * pages being detected as preserved although they are resident. * * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY. * * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be * correctly shadowed. We can do that for the PGSTE but not for PTE.I. * * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We * cannot easily shadow the SCA because of the ipte lock.
*/
}
staticint __init __kvm_s390_init(void)
{ int rc = -ENOMEM;
kvm_s390_gib_destroy(); if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
kvm_s390_pci_exit();
debug_unregister(kvm_s390_dbf);
debug_unregister(kvm_s390_dbf_uv);
}
/* Section: device related */ long kvm_arch_dev_ioctl(struct file *filp, unsignedint ioctl, unsignedlong arg)
{ if (ioctl == KVM_S390_ENABLE_SIE) return s390_enable_sie(); return -EINVAL;
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{ int r;
switch (ext) { case KVM_CAP_S390_PSW: case KVM_CAP_S390_GMAP: case KVM_CAP_SYNC_MMU: #ifdef CONFIG_KVM_S390_UCONTROL case KVM_CAP_S390_UCONTROL: #endif case KVM_CAP_ASYNC_PF: case KVM_CAP_SYNC_REGS: case KVM_CAP_ONE_REG: case KVM_CAP_ENABLE_CAP: case KVM_CAP_S390_CSS_SUPPORT: case KVM_CAP_IOEVENTFD: case KVM_CAP_S390_IRQCHIP: case KVM_CAP_VM_ATTRIBUTES: case KVM_CAP_MP_STATE: case KVM_CAP_IMMEDIATE_EXIT: case KVM_CAP_S390_INJECT_IRQ: case KVM_CAP_S390_USER_SIGP: case KVM_CAP_S390_USER_STSI: case KVM_CAP_S390_SKEYS: case KVM_CAP_S390_IRQ_STATE: case KVM_CAP_S390_USER_INSTR0: case KVM_CAP_S390_CMMA_MIGRATION: case KVM_CAP_S390_AIS: case KVM_CAP_S390_AIS_MIGRATION: case KVM_CAP_S390_VCPU_RESETS: case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_S390_DIAG318: case KVM_CAP_IRQFD_RESAMPLE:
r = 1; break; case KVM_CAP_SET_GUEST_DEBUG2:
r = KVM_GUESTDBG_VALID_MASK; break; case KVM_CAP_S390_HPAGE_1M:
r = 0; if (hpage && !(kvm && kvm_is_ucontrol(kvm)))
r = 1; break; case KVM_CAP_S390_MEM_OP:
r = MEM_OP_MAX_SIZE; break; case KVM_CAP_S390_MEM_OP_EXTENSION: /* * Flag bits indicating which extensions are supported. * If r > 0, the base extension must also be supported/indicated, * in order to maintain backwards compatibility.
*/
r = KVM_S390_MEMOP_EXTENSION_CAP_BASE |
KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG; break; case KVM_CAP_NR_VCPUS: case KVM_CAP_MAX_VCPUS: case KVM_CAP_MAX_VCPU_ID:
r = KVM_S390_BSCA_CPU_SLOTS; if (!kvm_s390_use_sca_entries())
r = KVM_MAX_VCPUS; elseif (sclp.has_esca && sclp.has_64bscao)
r = KVM_S390_ESCA_CPU_SLOTS; if (ext == KVM_CAP_NR_VCPUS)
r = min_t(unsignedint, num_online_cpus(), r); break; case KVM_CAP_S390_COW:
r = machine_has_esop(); break; case KVM_CAP_S390_VECTOR_REGISTERS:
r = test_facility(129); break; case KVM_CAP_S390_RI:
r = test_facility(64); break; case KVM_CAP_S390_GS:
r = test_facility(133); break; case KVM_CAP_S390_BPB:
r = test_facility(82); break; case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
r = async_destroy && is_prot_virt_host(); break; case KVM_CAP_S390_PROTECTED:
r = is_prot_virt_host(); break; case KVM_CAP_S390_PROTECTED_DUMP: {
u64 pv_cmds_dump[] = {
BIT_UVC_CMD_DUMP_INIT,
BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
BIT_UVC_CMD_DUMP_CPU,
BIT_UVC_CMD_DUMP_COMPLETE,
}; int i;
r = is_prot_virt_host();
for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) { if (!test_bit_inv(pv_cmds_dump[i],
(unsignedlong *)&uv_info.inst_calls_list)) {
r = 0; break;
}
} break;
} case KVM_CAP_S390_ZPCI_OP:
r = kvm_s390_pci_interp_allowed(); break; case KVM_CAP_S390_CPU_TOPOLOGY:
r = test_facility(11); break; default:
r = 0;
} return r;
}
/* Loop over all guest segments */
cur_gfn = memslot->base_gfn;
last_gfn = memslot->base_gfn + memslot->npages; for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
gaddr = gfn_to_gpa(cur_gfn);
vmaddr = gfn_to_hva_memslot(memslot, cur_gfn); if (kvm_is_error_hva(vmaddr)) continue;
bitmap_zero(bitmap, _PAGE_ENTRIES);
gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr); for (i = 0; i < _PAGE_ENTRIES; i++) { if (test_bit(i, bitmap))
mark_page_dirty(kvm, cur_gfn + i);
}
if (fatal_signal_pending(current)) return;
cond_resched();
}
}
/* Section: vm related */ staticvoid sca_del_vcpu(struct kvm_vcpu *vcpu);
/* * Get (and clear) the dirty memory log for a memory slot.
*/ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{ int r; unsignedlong n; struct kvm_memory_slot *memslot; int is_dirty;
if (kvm_is_ucontrol(kvm)) return -EINVAL;
mutex_lock(&kvm->slots_lock);
r = -EINVAL; if (log->slot >= KVM_USER_MEM_SLOTS) goto out;
r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); if (r) goto out;
/* Clear the dirty log */ if (is_dirty) {
n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
}
r = 0;
out:
mutex_unlock(&kvm->slots_lock); return r;
}
int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
{ int r;
if (cap->flags) return -EINVAL;
switch (cap->cap) { case KVM_CAP_S390_IRQCHIP:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
kvm->arch.use_irqchip = 1;
r = 0; break; case KVM_CAP_S390_USER_SIGP:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
kvm->arch.user_sigp = 1;
r = 0; break; case KVM_CAP_S390_VECTOR_REGISTERS:
mutex_lock(&kvm->lock); if (kvm->created_vcpus) {
r = -EBUSY;
} elseif (cpu_has_vx()) {
set_kvm_facility(kvm->arch.model.fac_mask, 129);
set_kvm_facility(kvm->arch.model.fac_list, 129); if (test_facility(134)) {
set_kvm_facility(kvm->arch.model.fac_mask, 134);
set_kvm_facility(kvm->arch.model.fac_list, 134);
} if (test_facility(135)) {
set_kvm_facility(kvm->arch.model.fac_mask, 135);
set_kvm_facility(kvm->arch.model.fac_list, 135);
} if (test_facility(148)) {
set_kvm_facility(kvm->arch.model.fac_mask, 148);
set_kvm_facility(kvm->arch.model.fac_list, 148);
} if (test_facility(152)) {
set_kvm_facility(kvm->arch.model.fac_mask, 152);
set_kvm_facility(kvm->arch.model.fac_list, 152);
} if (test_facility(192)) {
set_kvm_facility(kvm->arch.model.fac_mask, 192);
set_kvm_facility(kvm->arch.model.fac_list, 192);
} if (test_facility(198)) {
set_kvm_facility(kvm->arch.model.fac_mask, 198);
set_kvm_facility(kvm->arch.model.fac_list, 198);
} if (test_facility(199)) {
set_kvm_facility(kvm->arch.model.fac_mask, 199);
set_kvm_facility(kvm->arch.model.fac_list, 199);
}
r = 0;
} else
r = -EINVAL;
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
r ? "(not available)" : "(success)"); break; case KVM_CAP_S390_RI:
r = -EINVAL;
mutex_lock(&kvm->lock); if (kvm->created_vcpus) {
r = -EBUSY;
} elseif (test_facility(64)) {
set_kvm_facility(kvm->arch.model.fac_mask, 64);
set_kvm_facility(kvm->arch.model.fac_list, 64);
r = 0;
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
r ? "(not available)" : "(success)"); break; case KVM_CAP_S390_AIS:
mutex_lock(&kvm->lock); if (kvm->created_vcpus) {
r = -EBUSY;
} else {
set_kvm_facility(kvm->arch.model.fac_mask, 72);
set_kvm_facility(kvm->arch.model.fac_list, 72);
r = 0;
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "ENABLE: AIS %s",
r ? "(not available)" : "(success)"); break; case KVM_CAP_S390_GS:
r = -EINVAL;
mutex_lock(&kvm->lock); if (kvm->created_vcpus) {
r = -EBUSY;
} elseif (test_facility(133)) {
set_kvm_facility(kvm->arch.model.fac_mask, 133);
set_kvm_facility(kvm->arch.model.fac_list, 133);
r = 0;
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
r ? "(not available)" : "(success)"); break; case KVM_CAP_S390_HPAGE_1M:
mutex_lock(&kvm->lock); if (kvm->created_vcpus)
r = -EBUSY; elseif (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
r = -EINVAL; else {
r = 0;
mmap_write_lock(kvm->mm);
kvm->mm->context.allow_gmap_hpage_1m = 1;
mmap_write_unlock(kvm->mm); /* * We might have to create fake 4k page * tables. To avoid that the hardware works on * stale PGSTEs, we emulate these instructions.
*/
kvm->arch.use_skf = 0;
kvm->arch.use_pfmfi = 0;
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
r ? "(not available)" : "(success)"); break; case KVM_CAP_S390_USER_STSI:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
kvm->arch.user_stsi = 1;
r = 0; break; case KVM_CAP_S390_USER_INSTR0:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
kvm->arch.user_instr0 = 1;
icpt_operexc_on_all_vcpus(kvm);
r = 0; break; case KVM_CAP_S390_CPU_TOPOLOGY:
r = -EINVAL;
mutex_lock(&kvm->lock); if (kvm->created_vcpus) {
r = -EBUSY;
} elseif (test_facility(11)) {
set_kvm_facility(kvm->arch.model.fac_mask, 11);
set_kvm_facility(kvm->arch.model.fac_list, 11);
r = 0;
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
r ? "(not available)" : "(success)"); break; default:
r = -EINVAL; break;
} return r;
}
staticint kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
{ int ret;
switch (attr->attr) { case KVM_S390_VM_MEM_LIMIT_SIZE:
ret = 0;
VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
kvm->arch.mem_limit); if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
ret = -EFAULT; break; default:
ret = -ENXIO; break;
} return ret;
}
staticint kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
{ int ret; unsignedint idx; switch (attr->attr) { case KVM_S390_VM_MEM_ENABLE_CMMA:
ret = -ENXIO; if (!sclp.has_cmma) break;
VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
mutex_lock(&kvm->lock); if (kvm->created_vcpus)
ret = -EBUSY; elseif (kvm->mm->context.allow_gmap_hpage_1m)
ret = -EINVAL; else {
kvm->arch.use_cmma = 1; /* Not compatible with cmma. */
kvm->arch.use_pfmfi = 0;
ret = 0;
}
mutex_unlock(&kvm->lock); break; case KVM_S390_VM_MEM_CLR_CMMA:
ret = -ENXIO; if (!sclp.has_cmma) break;
ret = -EINVAL; if (!kvm->arch.use_cmma) break;
if (get_user(new_limit, (u64 __user *)attr->addr)) return -EFAULT;
if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
new_limit > kvm->arch.mem_limit) return -E2BIG;
if (!new_limit) return -EINVAL;
/* gmap_create takes last usable address */ if (new_limit != KVM_S390_NO_MEM_LIMIT)
new_limit -= 1;
ret = -EBUSY;
mutex_lock(&kvm->lock); if (!kvm->created_vcpus) { /* gmap_create will round the limit up */ struct gmap *new = gmap_create(current->mm, new_limit);
if (!new) {
ret = -ENOMEM;
} else {
gmap_remove(kvm->arch.gmap);
new->private = kvm;
kvm->arch.gmap = new;
ret = 0;
}
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
VM_EVENT(kvm, 3, "New guest asce: 0x%p",
(void *) kvm->arch.gmap->asce); break;
} default:
ret = -ENXIO; break;
} return ret;
}
staticvoid kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
{ /* Only set the ECB bits after guest requests zPCI interpretation */ if (!vcpu->kvm->arch.use_zpci_interp) return;
/* * If host is configured for PCI and the necessary facilities are * available, turn on interpretation for the life of this guest
*/
kvm->arch.use_zpci_interp = 1;
/* * Must be called with kvm->srcu held to avoid races on memslots, and with * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
*/ staticint kvm_s390_vm_start_migration(struct kvm *kvm)
{ struct kvm_memory_slot *ms; struct kvm_memslots *slots; unsignedlong ram_pages = 0; int bkt;
/* migration mode already enabled */ if (kvm->arch.migration_mode) return 0;
slots = kvm_memslots(kvm); if (!slots || kvm_memslots_empty(slots)) return -EINVAL;
if (!kvm->arch.use_cmma) {
kvm->arch.migration_mode = 1; return 0;
} /* mark all the pages in active slots as dirty */
kvm_for_each_memslot(ms, bkt, slots) { if (!ms->dirty_bitmap) return -EINVAL; /* * The second half of the bitmap is only used on x86, * and would be wasted otherwise, so we put it to good * use here to keep track of the state of the storage * attributes.
*/
memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
ram_pages += ms->npages;
}
atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
kvm->arch.migration_mode = 1;
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION); return 0;
}
/* * Must be called with kvm->slots_lock to avoid races with ourselves and * kvm_s390_vm_start_migration.
*/ staticint kvm_s390_vm_stop_migration(struct kvm *kvm)
{ /* migration mode already disabled */ if (!kvm->arch.migration_mode) return 0;
kvm->arch.migration_mode = 0; if (kvm->arch.use_cmma)
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); return 0;
}
staticint kvm_s390_vm_set_migration(struct kvm *kvm, struct kvm_device_attr *attr)
{ int res = -ENXIO;
mutex_lock(&kvm->slots_lock); switch (attr->attr) { case KVM_S390_VM_MIGRATION_START:
res = kvm_s390_vm_start_migration(kvm); break; case KVM_S390_VM_MIGRATION_STOP:
res = kvm_s390_vm_stop_migration(kvm); break; default: break;
}
mutex_unlock(&kvm->slots_lock);
staticint kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
{ int ret;
if (attr->flags) return -EINVAL;
mutex_lock(&kvm->lock); /* * For protected guests, the TOD is managed by the ultravisor, so trying * to change it will never bring the expected results.
*/ if (kvm_s390_pv_is_protected(kvm)) {
ret = -EOPNOTSUPP; goto out_unlock;
}
switch (attr->attr) { case KVM_S390_VM_TOD_EXT:
ret = kvm_s390_set_tod_ext(kvm, attr); break; case KVM_S390_VM_TOD_HIGH:
ret = kvm_s390_set_tod_high(kvm, attr); break; case KVM_S390_VM_TOD_LOW:
ret = kvm_s390_set_tod_low(kvm, attr); break; default:
ret = -ENXIO; break;
}
gtod = kvm_s390_get_tod_clock_fast(kvm); if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) return -EFAULT;
VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
return 0;
}
staticint kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
{ int ret;
if (attr->flags) return -EINVAL;
switch (attr->attr) { case KVM_S390_VM_TOD_EXT:
ret = kvm_s390_get_tod_ext(kvm, attr); break; case KVM_S390_VM_TOD_HIGH:
ret = kvm_s390_get_tod_high(kvm, attr); break; case KVM_S390_VM_TOD_LOW:
ret = kvm_s390_get_tod_low(kvm, attr); break; default:
ret = -ENXIO; break;
} return ret;
}
staticint kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
{ struct kvm_s390_vm_cpu_processor *proc;
u16 lowest_ibc, unblocked_ibc; int ret = 0;
staticint kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{ int ret = -ENXIO;
switch (attr->attr) { case KVM_S390_VM_CPU_PROCESSOR:
ret = kvm_s390_set_processor(kvm, attr); break; case KVM_S390_VM_CPU_PROCESSOR_FEAT:
ret = kvm_s390_set_processor_feat(kvm, attr); break; case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
ret = kvm_s390_set_processor_subfunc(kvm, attr); break; case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
ret = kvm_s390_set_uv_feat(kvm, attr); break;
} return ret;
}
staticint kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
{ struct kvm_s390_vm_cpu_processor *proc; int ret = 0;
staticint kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{ int ret = -ENXIO;
switch (attr->attr) { case KVM_S390_VM_CPU_PROCESSOR:
ret = kvm_s390_get_processor(kvm, attr); break; case KVM_S390_VM_CPU_MACHINE:
ret = kvm_s390_get_machine(kvm, attr); break; case KVM_S390_VM_CPU_PROCESSOR_FEAT:
ret = kvm_s390_get_processor_feat(kvm, attr); break; case KVM_S390_VM_CPU_MACHINE_FEAT:
ret = kvm_s390_get_machine_feat(kvm, attr); break; case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
ret = kvm_s390_get_processor_subfunc(kvm, attr); break; case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
ret = kvm_s390_get_machine_subfunc(kvm, attr); break; case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
ret = kvm_s390_get_processor_uv_feat(kvm, attr); break; case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
ret = kvm_s390_get_machine_uv_feat(kvm, attr); break;
} return ret;
}
/** * kvm_s390_update_topology_change_report - update CPU topology change report * @kvm: guest KVM description * @val: set or clear the MTCR bit * * Updates the Multiprocessor Topology-Change-Report bit to signal * the guest with a topology change. * This is only relevant if the topology facility is present. * * The SCA version, bsca or esca, doesn't matter as offset is the same.
*/ staticvoid kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
{ union sca_utility new, old; struct bsca_block *sca;
read_lock(&kvm->arch.sca_lock);
sca = kvm->arch.sca;
old = READ_ONCE(sca->utility); do { new = old; new.mtcr = val;
} while (!try_cmpxchg(&sca->utility.val, &old.val, new.val));
read_unlock(&kvm->arch.sca_lock);
}
staticint kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{ int ret;
switch (attr->group) { case KVM_S390_VM_MEM_CTRL:
ret = kvm_s390_set_mem_control(kvm, attr); break; case KVM_S390_VM_TOD:
ret = kvm_s390_set_tod(kvm, attr); break; case KVM_S390_VM_CPU_MODEL:
ret = kvm_s390_set_cpu_model(kvm, attr); break; case KVM_S390_VM_CRYPTO:
ret = kvm_s390_vm_set_crypto(kvm, attr); break; case KVM_S390_VM_MIGRATION:
ret = kvm_s390_vm_set_migration(kvm, attr); break; case KVM_S390_VM_CPU_TOPOLOGY:
ret = kvm_s390_set_topo_change_indication(kvm, attr); break; default:
ret = -ENXIO; break;
}
return ret;
}
staticint kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{ int ret;
switch (attr->group) { case KVM_S390_VM_MEM_CTRL:
ret = kvm_s390_get_mem_control(kvm, attr); break; case KVM_S390_VM_TOD:
ret = kvm_s390_get_tod(kvm, attr); break; case KVM_S390_VM_CPU_MODEL:
ret = kvm_s390_get_cpu_model(kvm, attr); break; case KVM_S390_VM_MIGRATION:
ret = kvm_s390_vm_get_migration(kvm, attr); break; case KVM_S390_VM_CPU_TOPOLOGY:
ret = kvm_s390_get_topo_change_indication(kvm, attr); break; default:
ret = -ENXIO; break;
}
return ret;
}
staticint kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{ int ret;
switch (attr->group) { case KVM_S390_VM_MEM_CTRL: switch (attr->attr) { case KVM_S390_VM_MEM_ENABLE_CMMA: case KVM_S390_VM_MEM_CLR_CMMA:
ret = sclp.has_cmma ? 0 : -ENXIO; break; case KVM_S390_VM_MEM_LIMIT_SIZE:
ret = 0; break; default:
ret = -ENXIO; break;
} break; case KVM_S390_VM_TOD: switch (attr->attr) { case KVM_S390_VM_TOD_LOW: case KVM_S390_VM_TOD_HIGH:
ret = 0; break; default:
ret = -ENXIO; break;
} break; case KVM_S390_VM_CPU_MODEL: switch (attr->attr) { case KVM_S390_VM_CPU_PROCESSOR: case KVM_S390_VM_CPU_MACHINE: case KVM_S390_VM_CPU_PROCESSOR_FEAT: case KVM_S390_VM_CPU_MACHINE_FEAT: case KVM_S390_VM_CPU_MACHINE_SUBFUNC: case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC: case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST: case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
ret = 0; break; default:
ret = -ENXIO; break;
} break; case KVM_S390_VM_CRYPTO: switch (attr->attr) { case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
ret = 0; break; case KVM_S390_VM_CRYPTO_ENABLE_APIE: case KVM_S390_VM_CRYPTO_DISABLE_APIE:
ret = ap_instructions_available() ? 0 : -ENXIO; break; default:
ret = -ENXIO; break;
} break; case KVM_S390_VM_MIGRATION:
ret = 0; break; case KVM_S390_VM_CPU_TOPOLOGY:
ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO; break; default:
ret = -ENXIO; break;
}
return ret;
}
staticint kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
{
uint8_t *keys;
uint64_t hva; int srcu_idx, i, r = 0;
if (args->flags != 0) return -EINVAL;
/* Is this guest using storage keys? */ if (!mm_uses_skeys(current->mm)) return KVM_S390_GET_SKEYS_NONE;
/* Enforce sane limit on memory allocation */ if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) return -EINVAL;
keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); if (!keys) return -ENOMEM;
mmap_read_lock(current->mm);
srcu_idx = srcu_read_lock(&kvm->srcu); for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i); if (kvm_is_error_hva(hva)) {
r = -EFAULT; break;
}
r = get_guest_storage_key(current->mm, hva, &keys[i]); if (r) break;
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
mmap_read_unlock(current->mm);
if (!r) {
r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, sizeof(uint8_t) * args->count); if (r)
r = -EFAULT;
}
kvfree(keys); return r;
}
staticint kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
{
uint8_t *keys;
uint64_t hva; int srcu_idx, i, r = 0; bool unlocked;
if (args->flags != 0) return -EINVAL;
/* Enforce sane limit on memory allocation */ if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) return -EINVAL;
keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); if (!keys) return -ENOMEM;
r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, sizeof(uint8_t) * args->count); if (r) {
r = -EFAULT; goto out;
}
/* Enable storage key handling for the guest */
r = s390_enable_skey(); if (r) goto out;
i = 0;
mmap_read_lock(current->mm);
srcu_idx = srcu_read_lock(&kvm->srcu); while (i < args->count) {
unlocked = false;
hva = gfn_to_hva(kvm, args->start_gfn + i); if (kvm_is_error_hva(hva)) {
r = -EFAULT; break;
}
/* Lowest order bit is reserved */ if (keys[i] & 0x01) {
r = -EINVAL; break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.