mmap_read_lock(vcpu->kvm->mm); /* * We checked for start >= end above, so lets check for the * fast path (no prefix swap page involved)
*/ if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) {
do_discard_gfn_range(vcpu, gpa_to_gfn(start), gpa_to_gfn(end));
} else { /* * This is slow path. gmap_discard will check for start * so lets split this into before prefix, prefix, after * prefix and let gmap_discard make some of these calls * NOPs.
*/
do_discard_gfn_range(vcpu, gpa_to_gfn(start), gpa_to_gfn(prefix)); if (start <= prefix)
do_discard_gfn_range(vcpu, 0, 1); if (end > prefix + PAGE_SIZE)
do_discard_gfn_range(vcpu, 1, 2);
do_discard_gfn_range(vcpu, gpa_to_gfn(prefix) + 2, gpa_to_gfn(end));
}
mmap_read_unlock(vcpu->kvm->mm); return 0;
}
switch (parm.subcode) { case 0: /* TOKEN */
VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx " "select mask 0x%llx compare mask 0x%llx",
parm.token_addr, parm.select_mask, parm.compare_mask); if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) { /* * If the pagefault handshake is already activated, * the token must not be changed. We have to return * decimal 8 instead, as mandated in SC24-6084.
*/
vcpu->run->s.regs.gprs[ry] = 8; return 0;
}
if (!kvm_is_gpa_in_memslot(vcpu->kvm, parm.token_addr)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu->arch.pfault_token = parm.token_addr;
vcpu->arch.pfault_select = parm.select_mask;
vcpu->arch.pfault_compare = parm.compare_mask;
vcpu->run->s.regs.gprs[ry] = 0;
rc = 0; break; case 1: /* * CANCEL * Specification allows to let already pending tokens survive * the cancel, therefore to reduce code complexity, we assume * all outstanding tokens are already pending.
*/
VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr); if (parm.token_addr || parm.select_mask ||
parm.compare_mask || parm.zarch) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu->run->s.regs.gprs[ry] = 0; /* * If the pfault handling was not established or is already * canceled SC24-6084 requests to return decimal 4.
*/ if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
vcpu->run->s.regs.gprs[ry] = 4; else
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
/* target host CPU already running */ if (!vcpu_is_preempted(tcpu_cpu)) goto no_yield;
smp_yield_cpu(tcpu_cpu);
VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: yield forwarded",
tid);
vcpu->stat.diag_9c_forward++; return 0;
}
if (kvm_vcpu_yield_to(tcpu) <= 0) goto no_yield;
VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid); return 0;
no_yield:
VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
vcpu->stat.diag_9c_ignored++; return 0;
}
/* * no need to check the return value of vcpu_stop as it can only have * an error for protvirt, but protvirt means user cpu state
*/ if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm_s390_vcpu_stop(vcpu);
vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
vcpu->run->s390_reset_flags);
trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags); return -EREMOTE;
}
staticint __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
{ int ret;
vcpu->stat.instruction_diagnose_500++; /* No virtio-ccw notification? Get out quickly. */ if (!vcpu->kvm->arch.css_support ||
(vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) return -EOPNOTSUPP;
/* * The layout is as follows: * - gpr 2 contains the subchannel id (passed as addr) * - gpr 3 contains the virtqueue index (passed as datamatch) * - gpr 4 contains the index on the bus (optionally)
*/
ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
vcpu->run->s.regs.gprs[2] & 0xffffffff,
8, &vcpu->run->s.regs.gprs[3],
vcpu->run->s.regs.gprs[4]);
/* * Return cookie in gpr 2, but don't overwrite the register if the * diagnose will be handled by userspace.
*/ if (ret != -EOPNOTSUPP)
vcpu->run->s.regs.gprs[2] = ret; /* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */ return ret < 0 ? ret : 0;
}
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
{ int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
trace_kvm_s390_handle_diag(vcpu, code); switch (code) { case 0x10: return diag_release_pages(vcpu); case 0x44: return __diag_time_slice_end(vcpu); case 0x9c: return __diag_time_slice_end_directed(vcpu); case 0x258: return __diag_page_ref_service(vcpu); case 0x308: return __diag_ipl_functions(vcpu); case 0x500: return __diag_virtio_hypercall(vcpu); default:
vcpu->stat.instruction_diagnose_other++; return -EOPNOTSUPP;
}
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.0 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.