/* so directly declare capget to check caps without libcap */ int capget(cap_user_header_t header, cap_user_data_t data);
/** * In order to create user controlled virtual machines on S390, * check KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL * as privileged user (SYS_ADMIN).
*/ void require_ucontrol_admin(void)
{ struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3]; struct __user_cap_header_struct hdr = {
.version = _LINUX_CAPABILITY_VERSION_3,
}; int rc;
FIXTURE(uc_kvm)
{ struct kvm_s390_sie_block *sie_block; struct kvm_run *run;
uintptr_t base_gpa;
uintptr_t code_gpa;
uintptr_t base_hva;
uintptr_t code_hva; int kvm_run_size;
vm_paddr_t pgd; void *vm_mem; int vcpu_fd; int kvm_fd; int vm_fd;
};
/** * create VM with single vcpu, map kvm_run and SIE control block for easy access
*/
FIXTURE_SETUP(uc_kvm)
{ struct kvm_s390_vm_cpu_processor info; int rc;
self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run))
TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size));
self->run = (struct kvm_run *)mmap(NULL, self->kvm_run_size,
PROT_READ | PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
ASSERT_NE(self->run, MAP_FAILED); /** * For virtual cpus that have been created with S390 user controlled * virtual machines, the resulting vcpu fd can be memory mapped at page * offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of * the virtual cpu's hardware control block.
*/
self->sie_block = (struct kvm_s390_sie_block *)mmap(NULL, PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_SHARED,
self->vcpu_fd, KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
ASSERT_NE(self->sie_block, MAP_FAILED);
TH_LOG("VM created %p %p", self->run, self->sie_block);
/** * Assert HPAGE CAP cannot be enabled on UCONTROL VM
*/
TEST(uc_cap_hpage)
{ int rc, kvm_fd, vm_fd, vcpu_fd; struct kvm_enable_cap cap = {
.cap = KVM_CAP_S390_HPAGE_1M,
};
/* assert hpages are not supported on ucontrol vm */
rc = ioctl(vm_fd, KVM_CHECK_EXTENSION, KVM_CAP_S390_HPAGE_1M);
EXPECT_EQ(0, rc);
/* Test that KVM_CAP_S390_HPAGE_1M can't be enabled for a ucontrol vm */
rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
EXPECT_EQ(-1, rc);
EXPECT_EQ(EINVAL, errno);
/* assert HPAGE CAP is rejected after vCPU creation */
vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
ASSERT_GE(vcpu_fd, 0);
rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
EXPECT_EQ(-1, rc);
EXPECT_EQ(EBUSY, errno);
/* * Handle the SIEIC exit * * fail on codes not expected in the test cases * Returns if interception is handled / execution can be continued
*/ staticvoid uc_skey_enable(FIXTURE_DATA(uc_kvm) *self)
{ struct kvm_s390_sie_block *sie_block = self->sie_block;
/* * Handle the SIEIC exit * * fail on codes not expected in the test cases * Returns if interception is handled / execution can be continued
*/ staticbool uc_handle_sieic(FIXTURE_DATA(uc_kvm) *self)
{ struct kvm_s390_sie_block *sie_block = self->sie_block; struct kvm_run *run = self->run;
/* check SIE interception code */
pr_info("sieic: 0x%.2x 0x%.4x 0x%.8x\n",
run->s390_sieic.icptcode,
run->s390_sieic.ipa,
run->s390_sieic.ipb); switch (run->s390_sieic.icptcode) { case ICPT_INST: /* end execution in caller on intercepted instruction */
pr_info("sie instruction interception\n"); return uc_handle_insn_ic(self); case ICPT_KSS:
uc_skey_enable(self); returntrue; case ICPT_OPEREXC: /* operation exception */
TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb); default:
TEST_FAIL("UNEXPECTED SIEIC CODE %d", run->s390_sieic.icptcode);
} returntrue;
}
/* verify VM state on exit */ staticbool uc_handle_exit(FIXTURE_DATA(uc_kvm) *self)
{ struct kvm_run *run = self->run;
switch (run->exit_reason) { case KVM_EXIT_S390_UCONTROL: /** check program interruption code * handle page fault --> ucas map
*/
uc_handle_exit_ucontrol(self); break; case KVM_EXIT_S390_SIEIC: return uc_handle_sieic(self); default:
pr_info("exit_reason %2d not handled\n", run->exit_reason);
} returntrue;
}
/* run the VM until interrupted */ staticint uc_run_once(FIXTURE_DATA(uc_kvm) *self)
{ int rc;
/* copy test_mem_asm to code_hva / code_gpa */
TH_LOG("copy code %p to vm mapped memory %p / %p",
&test_mem_asm, (void *)self->code_hva, (void *)self->code_gpa);
memcpy((void *)self->code_hva, &test_mem_asm, PAGE_SIZE);
/* DAT disabled + 64 bit mode */
run->psw_mask = 0x0000000180000000ULL;
run->psw_addr = self->code_gpa;
/* set register content for test_mem_asm to access not mapped memory*/
sync_regs->gprs[1] = 0x55;
sync_regs->gprs[5] = self->base_gpa;
sync_regs->gprs[6] = VM_MEM_SIZE + disp;
run->kvm_dirty_regs |= KVM_SYNC_GPRS;
/* run and expect to fail with ucontrol pic segment translation */
ASSERT_EQ(0, uc_run_once(self));
ASSERT_EQ(1, sync_regs->gprs[0]);
ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
/* fail to map memory with not segment aligned address */
rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE + disp, VM_MEM_EXT_SIZE);
ASSERT_GT(0, rc)
TH_LOG("ucas map for non segment address should fail but didn't; " "result %d not expected, %s", rc, strerror(errno));
/* map / make additional memory available */
rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE);
ASSERT_EQ(0, rc)
TH_LOG("ucas map result %d not expected, %s", rc, strerror(errno));
ASSERT_EQ(0, uc_run_once(self));
ASSERT_EQ(false, uc_handle_exit(self));
uc_assert_diag44(self);
/* assert registers and memory are in expected state */
ASSERT_EQ(2, sync_regs->gprs[0]);
ASSERT_EQ(0x55, sync_regs->gprs[1]);
ASSERT_EQ(0x55, *(u32 *)gpa2hva(self, self->base_gpa + VM_MEM_SIZE + disp));
/* unmap and run loop again */
rc = uc_unmap_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE);
ASSERT_EQ(0, rc)
TH_LOG("ucas unmap result %d not expected, %s", rc, strerror(errno));
ASSERT_EQ(0, uc_run_once(self));
ASSERT_EQ(3, sync_regs->gprs[0]);
ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code); /* handle ucontrol exit and remap memory after previous map and unmap */
ASSERT_EQ(true, uc_handle_exit(self));
}
/* Set registers to values that are different from the ones that we expect below */ for (int i = 0; i < 8; i++)
sync_regs->gprs[i] = 8;
run->kvm_dirty_regs |= KVM_SYNC_GPRS;
/* copy test_gprs_asm to code_hva / code_gpa */
TH_LOG("copy code %p to vm mapped memory %p / %p",
&test_gprs_asm, (void *)self->code_hva, (void *)self->code_gpa);
memcpy((void *)self->code_hva, &test_gprs_asm, PAGE_SIZE);
/* DAT disabled + 64 bit mode */
run->psw_mask = 0x0000000180000000ULL;
run->psw_addr = self->code_gpa;
/* run and expect interception of diag 44 */
ASSERT_EQ(0, uc_run_once(self));
ASSERT_EQ(false, uc_handle_exit(self));
uc_assert_diag44(self);
/* Retrieve and check guest register values */
ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, ®s)); for (int i = 0; i < 8; i++) {
ASSERT_EQ(i, regs.gprs[i]);
ASSERT_EQ(i, sync_regs->gprs[i]);
}
/* run and expect interception of diag 44 again */
ASSERT_EQ(0, uc_run_once(self));
ASSERT_EQ(false, uc_handle_exit(self));
uc_assert_diag44(self);
/* check continued increment of register 0 value */
ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, ®s));
ASSERT_EQ(1, regs.gprs[0]);
ASSERT_EQ(1, sync_regs->gprs[0]);
}
/* * Bail out and skip the test after uc_skey_enable was executed but iske * is still intercepted. Instructions are not handled by the kernel. * Thus there is no need to test this here.
*/
TEST_ASSERT_EQ(0, sie_block->cpuflags & CPUSTAT_KSS);
TEST_ASSERT_EQ(0, sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE));
TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
TEST_REQUIRE(sie_block->ipa != 0xb22b);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.