rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); if (rc == -EBUSY)
rc = SIGP_CC_BUSY; elseif (rc == 0)
VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
dst_vcpu->vcpu_id);
/* * Make sure the new value is valid memory. We only need to check the * first page, since address is 8k aligned and memory pieces are always * at least 1MB aligned and have at least a size of 1MB.
*/ if (!kvm_is_gpa_in_memslot(vcpu->kvm, irq.u.prefix.address)) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INVALID_PARAMETER; return SIGP_CC_STATUS_STORED;
}
VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
dst_vcpu->vcpu_id, rc);
return rc;
}
staticint __prepare_sigp_re_start(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, u8 order_code)
{ struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; /* handle (RE)START in user space */ int rc = -EOPNOTSUPP;
/* make sure we don't race with STOP irq injection */
spin_lock(&li->lock); if (kvm_s390_is_stop_irq_pending(dst_vcpu))
rc = SIGP_CC_BUSY;
spin_unlock(&li->lock);
return rc;
}
staticint __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, u8 order_code)
{ /* handle (INITIAL) CPU RESET in user space */ return -EOPNOTSUPP;
}
staticint __prepare_sigp_unknown(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
{ /* handle unknown orders in user space */ return -EOPNOTSUPP;
}
/* * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders * are processed asynchronously. Until the affected VCPU finishes * its work and calls back into KVM to clear the (RESTART or STOP) * interrupt, we need to return any new non-reset orders "busy". * * This is important because a single VCPU could issue: * 1) SIGP STOP $DESTINATION * 2) SIGP SENSE $DESTINATION * * If the SIGP SENSE would not be rejected as "busy", it could * return an incorrect answer as to whether the VCPU is STOPPED * or OPERATING.
*/ if (order_code != SIGP_INITIAL_CPU_RESET &&
order_code != SIGP_CPU_RESET) { /* * Lockless check. Both SIGP STOP and SIGP (RE)START * properly synchronize everything while processing * their orders, while the guest cannot observe a * difference when issuing other orders from two * different VCPUs.
*/ if (kvm_s390_is_stop_irq_pending(dst_vcpu) ||
kvm_s390_is_restart_irq_pending(dst_vcpu)) return SIGP_CC_BUSY;
}
switch (order_code) { case SIGP_SENSE:
vcpu->stat.instruction_sigp_sense++;
rc = __sigp_sense(vcpu, dst_vcpu, status_reg); break; case SIGP_EXTERNAL_CALL:
vcpu->stat.instruction_sigp_external_call++;
rc = __sigp_external_call(vcpu, dst_vcpu, status_reg); break; case SIGP_EMERGENCY_SIGNAL:
vcpu->stat.instruction_sigp_emergency++;
rc = __sigp_emergency(vcpu, dst_vcpu); break; case SIGP_STOP:
vcpu->stat.instruction_sigp_stop++;
rc = __sigp_stop(vcpu, dst_vcpu); break; case SIGP_STOP_AND_STORE_STATUS:
vcpu->stat.instruction_sigp_stop_store_status++;
rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg); break; case SIGP_STORE_STATUS_AT_ADDRESS:
vcpu->stat.instruction_sigp_store_status++;
rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
status_reg); break; case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++;
rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg); break; case SIGP_COND_EMERGENCY_SIGNAL:
vcpu->stat.instruction_sigp_cond_emergency++;
rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
status_reg); break; case SIGP_SENSE_RUNNING:
vcpu->stat.instruction_sigp_sense_running++;
rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg); break; case SIGP_START:
vcpu->stat.instruction_sigp_start++;
rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); break; case SIGP_RESTART:
vcpu->stat.instruction_sigp_restart++;
rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); break; case SIGP_INITIAL_CPU_RESET:
vcpu->stat.instruction_sigp_init_cpu_reset++;
rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); break; case SIGP_CPU_RESET:
vcpu->stat.instruction_sigp_cpu_reset++;
rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); break; default:
vcpu->stat.instruction_sigp_unknown++;
rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
}
if (rc == -EOPNOTSUPP)
VCPU_EVENT(vcpu, 4, "sigp order %u -> cpu %x: handled in user space",
order_code, dst_vcpu->vcpu_id);
switch (order_code) { case SIGP_SENSE: case SIGP_EXTERNAL_CALL: case SIGP_EMERGENCY_SIGNAL: case SIGP_COND_EMERGENCY_SIGNAL: case SIGP_SENSE_RUNNING: return 0; /* update counters as we're directly dropping to user space */ case SIGP_STOP:
vcpu->stat.instruction_sigp_stop++; break; case SIGP_STOP_AND_STORE_STATUS:
vcpu->stat.instruction_sigp_stop_store_status++; break; case SIGP_STORE_STATUS_AT_ADDRESS:
vcpu->stat.instruction_sigp_store_status++; break; case SIGP_STORE_ADDITIONAL_STATUS:
vcpu->stat.instruction_sigp_store_adtl_status++; break; case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++; break; case SIGP_START:
vcpu->stat.instruction_sigp_start++; break; case SIGP_RESTART:
vcpu->stat.instruction_sigp_restart++; break; case SIGP_INITIAL_CPU_RESET:
vcpu->stat.instruction_sigp_init_cpu_reset++; break; case SIGP_CPU_RESET:
vcpu->stat.instruction_sigp_cpu_reset++; break; default:
vcpu->stat.instruction_sigp_unknown++;
}
VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace",
order_code, cpu_addr);
return 1;
}
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{ int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int r3 = vcpu->arch.sie_block->ipa & 0x000f;
u32 parameter;
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
u8 order_code; int rc;
/* sigp in userspace can exit */ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr)) return -EOPNOTSUPP;
/* * Handle SIGP partial execution interception. * * This interception will occur at the source cpu when a source cpu sends an * external call to a target cpu and the target cpu has the WAIT bit set in * its cpuflags. Interception will occur after the interrupt indicator bits at * the target cpu have been set. All error cases will lead to instruction * interception, therefore nothing is to be checked or prepared.
*/ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
{ int r3 = vcpu->arch.sie_block->ipa & 0x000f;
u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; struct kvm_vcpu *dest_vcpu;
u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
if (order_code == SIGP_EXTERNAL_CALL) {
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.