table_group = iommu_group_get_iommudata(grp); if (WARN_ON(!table_group)) continue;
list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) { for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { if (table_group->tables[i] != stit->tbl) continue;
long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, struct iommu_group *grp)
{ struct kvmppc_spapr_tce_table *stt = NULL; bool found = false; struct iommu_table *tbl = NULL; struct iommu_table_group *table_group; long i; struct kvmppc_spapr_tce_iommu_table *stit; CLASS(fd, f)(tablefd);
if (fd_empty(f)) return -EBADF;
rcu_read_lock();
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { if (stt == fd_file(f)->private_data) {
found = true; break;
}
}
rcu_read_unlock();
if (!found) return -EINVAL;
table_group = iommu_group_get_iommudata(grp); if (WARN_ON(!table_group)) return -EFAULT;
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { struct iommu_table *tbltmp = table_group->tables[i];
if (!tbltmp) continue; /* Make sure hardware table parameters are compatible */ if ((tbltmp->it_page_shift <= stt->page_shift) &&
(tbltmp->it_offset << tbltmp->it_page_shift ==
stt->offset << stt->page_shift) &&
(tbltmp->it_size << tbltmp->it_page_shift >=
stt->size << stt->page_shift)) { /* * Reference the table to avoid races with * add/remove DMA windows.
*/
tbl = iommu_tce_table_get(tbltmp); break;
}
} if (!tbl) return -EINVAL;
rcu_read_lock();
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { if (tbl != stit->tbl) continue;
if (!kref_get_unless_zero(&stit->kref)) { /* stit is being destroyed */
iommu_tce_table_put(tbl);
rcu_read_unlock(); return -ENOTTY;
} /* * The table is already known to this KVM, we just increased * its KVM reference counter and can return.
*/
rcu_read_unlock(); return 0;
}
rcu_read_unlock();
/* * Handles TCE requests for emulated devices. * Puts guest TCE values to the table and expects user space to convert them. * Cannot fail so kvmppc_tce_validate must be called before it.
*/ staticvoid kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, unsignedlong idx, unsignedlong tce)
{ struct page *page;
u64 *tbl; unsignedlong sttpage;
if (!pua) /* it_userspace allocation might be delayed */ return H_TOO_HARD;
mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift); if (!mem) /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ return H_TOO_HARD;
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) return H_TOO_HARD;
if (mm_iommu_mapped_inc(mem)) return H_TOO_HARD;
ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir); if (WARN_ON_ONCE(ret)) {
mm_iommu_mapped_dec(mem); return H_TOO_HARD;
}
if (dir != DMA_NONE)
kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, unsignedlong liobn, unsignedlong ioba, unsignedlong tce_list, unsignedlong npages)
{ struct kvmppc_spapr_tce_table *stt; long i, ret = H_SUCCESS, idx; unsignedlong entry, ua = 0;
u64 __user *tces;
u64 tce; struct kvmppc_spapr_tce_iommu_table *stit;
stt = kvmppc_find_table(vcpu->kvm, liobn); if (!stt) return H_TOO_HARD;
entry = ioba >> stt->page_shift; /* * SPAPR spec says that the maximum size of the list is 512 TCEs * so the whole table fits in 4K page
*/ if (npages > 512) return H_PARAMETER;
if (tce_list & (SZ_4K - 1)) return H_PARAMETER;
ret = kvmppc_ioba_validate(stt, ioba, npages); if (ret != H_SUCCESS) return ret;
idx = srcu_read_lock(&vcpu->kvm->srcu); if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
ret = H_TOO_HARD; goto unlock_exit;
}
tces = (u64 __user *) ua;
for (i = 0; i < npages; ++i) { if (get_user(tce, tces + i)) {
ret = H_TOO_HARD; goto unlock_exit;
}
tce = be64_to_cpu(tce);
ret = kvmppc_tce_validate(stt, tce); if (ret != H_SUCCESS) goto unlock_exit;
}
for (i = 0; i < npages; ++i) { /* * This looks unsafe, because we validate, then regrab * the TCE from userspace which could have been changed by * another thread. * * But it actually is safe, because the relevant checks will be * re-executed in the following code. If userspace tries to * change this dodgily it will result in a messier failure mode * but won't threaten the host.
*/ if (get_user(tce, tces + i)) {
ret = H_TOO_HARD; goto unlock_exit;
}
tce = be64_to_cpu(tce);
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER; goto unlock_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry + i, ua,
iommu_tce_direction(tce));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.