// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Western Digital Corporation or its affiliates. * Copyright (C) 2022 Ventana Micro Systems Inc. * * Authors: * Anup Patel <apatel@ventanamicro.com>
*/
/* Irqchip can be initialized only once */ if (kvm_riscv_aia_initialized(kvm)) return -EBUSY;
/* We might be in the middle of creating a VCPU? */ if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus)) return -EBUSY;
/* Number of sources should be less than or equals number of IDs */ if (aia->nr_ids < aia->nr_sources) return -EINVAL;
/* APLIC base is required for non-zero number of sources */ if (aia->nr_sources && aia->aplic_addr == KVM_RISCV_AIA_UNDEF_ADDR) return -EINVAL;
/* Initialize APLIC */
ret = kvm_riscv_aia_aplic_init(kvm); if (ret) return ret;
/* Iterate over each VCPU */
kvm_for_each_vcpu(idx, vcpu, kvm) {
vaia = &vcpu->arch.aia_context;
/* IMSIC base is required */ if (vaia->imsic_addr == KVM_RISCV_AIA_UNDEF_ADDR) {
ret = -EINVAL; goto fail_cleanup_imsics;
}
/* All IMSICs should have matching base PPN */ if (base_ppn == KVM_RISCV_AIA_UNDEF_ADDR)
base_ppn = aia_imsic_ppn(aia, vaia->imsic_addr); if (base_ppn != aia_imsic_ppn(aia, vaia->imsic_addr)) {
ret = -EINVAL; goto fail_cleanup_imsics;
}
/* Update HART index of the IMSIC based on IMSIC base */
vaia->hart_index = aia_imsic_hart_index(aia,
vaia->imsic_addr);
/* Initialize IMSIC for this VCPU */
ret = kvm_riscv_vcpu_aia_imsic_init(vcpu); if (ret) goto fail_cleanup_imsics;
}
/* Set the initialized flag */
kvm->arch.aia.initialized = true;
return 0;
fail_cleanup_imsics: for (i = idx - 1; i >= 0; i--) {
vcpu = kvm_get_vcpu(kvm, i); if (!vcpu) continue;
kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
}
kvm_riscv_aia_aplic_cleanup(kvm); return ret;
}
staticint aia_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
u32 nr;
u64 addr; int nr_vcpus, r = -ENXIO; unsignedlong v, type = (unsignedlong)attr->attr; void __user *uaddr = (void __user *)(long)attr->addr;
switch (attr->group) { case KVM_DEV_RISCV_AIA_GRP_CONFIG: if (copy_from_user(&nr, uaddr, sizeof(nr))) return -EFAULT;
mutex_lock(&dev->kvm->lock);
r = aia_config(dev->kvm, type, &nr, true);
mutex_unlock(&dev->kvm->lock);
break;
case KVM_DEV_RISCV_AIA_GRP_ADDR: if (copy_from_user(&addr, uaddr, sizeof(addr))) return -EFAULT;
nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
mutex_lock(&dev->kvm->lock); if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
r = aia_aplic_addr(dev->kvm, &addr, true); elseif (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
r = aia_imsic_addr(dev->kvm, &addr,
type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), true);
mutex_unlock(&dev->kvm->lock);
break;
case KVM_DEV_RISCV_AIA_GRP_CTRL: switch (type) { case KVM_DEV_RISCV_AIA_CTRL_INIT:
mutex_lock(&dev->kvm->lock);
r = aia_init(dev->kvm);
mutex_unlock(&dev->kvm->lock); break;
}
break; case KVM_DEV_RISCV_AIA_GRP_APLIC: if (copy_from_user(&nr, uaddr, sizeof(nr))) return -EFAULT;
mutex_lock(&dev->kvm->lock);
r = kvm_riscv_aia_aplic_set_attr(dev->kvm, type, nr);
mutex_unlock(&dev->kvm->lock);
break; case KVM_DEV_RISCV_AIA_GRP_IMSIC: if (copy_from_user(&v, uaddr, sizeof(v))) return -EFAULT;
mutex_lock(&dev->kvm->lock);
r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, true, &v);
mutex_unlock(&dev->kvm->lock);
break;
}
return r;
}
staticint aia_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
u32 nr;
u64 addr; int nr_vcpus, r = -ENXIO; void __user *uaddr = (void __user *)(long)attr->addr; unsignedlong v, type = (unsignedlong)attr->attr;
switch (attr->group) { case KVM_DEV_RISCV_AIA_GRP_CONFIG: if (copy_from_user(&nr, uaddr, sizeof(nr))) return -EFAULT;
mutex_lock(&dev->kvm->lock);
r = aia_config(dev->kvm, type, &nr, false);
mutex_unlock(&dev->kvm->lock); if (r) return r;
if (copy_to_user(uaddr, &nr, sizeof(nr))) return -EFAULT;
break; case KVM_DEV_RISCV_AIA_GRP_ADDR: if (copy_from_user(&addr, uaddr, sizeof(addr))) return -EFAULT;
nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
mutex_lock(&dev->kvm->lock); if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
r = aia_aplic_addr(dev->kvm, &addr, false); elseif (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
r = aia_imsic_addr(dev->kvm, &addr,
type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), false);
mutex_unlock(&dev->kvm->lock); if (r) return r;
if (copy_to_user(uaddr, &addr, sizeof(addr))) return -EFAULT;
break; case KVM_DEV_RISCV_AIA_GRP_APLIC: if (copy_from_user(&nr, uaddr, sizeof(nr))) return -EFAULT;
mutex_lock(&dev->kvm->lock);
r = kvm_riscv_aia_aplic_get_attr(dev->kvm, type, &nr);
mutex_unlock(&dev->kvm->lock); if (r) return r;
if (copy_to_user(uaddr, &nr, sizeof(nr))) return -EFAULT;
break; case KVM_DEV_RISCV_AIA_GRP_IMSIC: if (copy_from_user(&v, uaddr, sizeof(v))) return -EFAULT;
mutex_lock(&dev->kvm->lock);
r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, false, &v);
mutex_unlock(&dev->kvm->lock); if (r) return r;
if (copy_to_user(uaddr, &v, sizeof(v))) return -EFAULT;
break;
}
return r;
}
staticint aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{ int nr_vcpus;
switch (attr->group) { case KVM_DEV_RISCV_AIA_GRP_CONFIG: switch (attr->attr) { case KVM_DEV_RISCV_AIA_CONFIG_MODE: case KVM_DEV_RISCV_AIA_CONFIG_IDS: case KVM_DEV_RISCV_AIA_CONFIG_SRCS: case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS: case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT: case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS: case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS: return 0;
} break; case KVM_DEV_RISCV_AIA_GRP_ADDR:
nr_vcpus = atomic_read(&dev->kvm->online_vcpus); if (attr->attr == KVM_DEV_RISCV_AIA_ADDR_APLIC) return 0; elseif (attr->attr < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus)) return 0; break; case KVM_DEV_RISCV_AIA_GRP_CTRL: switch (attr->attr) { case KVM_DEV_RISCV_AIA_CTRL_INIT: return 0;
} break; case KVM_DEV_RISCV_AIA_GRP_APLIC: return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr); case KVM_DEV_RISCV_AIA_GRP_IMSIC: return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr);
}
int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
{ /* Proceed only if AIA was initialized successfully */ if (!kvm_riscv_aia_initialized(vcpu->kvm)) return 1;
/* Update the IMSIC HW state before entering guest mode */ return kvm_riscv_vcpu_aia_imsic_update(vcpu);
}
/* * We don't do any memory allocations over here because these * will be done after AIA device is initialized by the user-space. * * Refer, aia_init() implementation for more details.
*/
void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
{ /* Proceed only if AIA was initialized successfully */ if (!kvm_riscv_aia_initialized(vcpu->kvm)) return;
int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsignedint irq, bool level)
{ /* Proceed only if AIA was initialized successfully */ if (!kvm_riscv_aia_initialized(kvm)) return -EBUSY;
/* * We don't do any memory allocations over here because these * will be done after AIA device is initialized by the user-space. * * Refer, aia_init() implementation for more details.
*/
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.