/* When running a PAPR guest, SDR1 contains a HVA address instead
of a GPA */ if (vcpu->arch.papr_enabled)
r = pteg; else
r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
if (kvm_is_error_hva(r)) return r; return r | (pteg & ~PAGE_MASK);
}
static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
{ int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
u64 avpn;
if (p < 16)
avpn >>= ((80 - p) - 56) - 8; /* 16 - p */ else
avpn <<= p - 16;
return avpn;
}
/* * Return page size encoded in the second word of a HPTE, or * -1 for an invalid encoding for the base page size indicated by * the SLB entry. This doesn't handle mixed pagesize segments yet.
*/ staticint decode_pagesize(struct kvmppc_slb *slbe, u64 r)
{ switch (slbe->base_page_size) { case MMU_PAGE_64K: if ((r & 0xf000) == 0x1000) return MMU_PAGE_64K; break; case MMU_PAGE_16M: if ((r & 0xff000) == 0) return MMU_PAGE_16M; break;
} return -1;
}
/* Check all relevant fields of 1st dword */ if ((pte0 & v_mask) == v_val) { /* If large page bit is set, check pgsize encoding */ if (slbe->large &&
(vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
pgsize = decode_pagesize(slbe, pte1); if (pgsize < 0) continue;
}
found = true; break;
}
}
if (!found) { if (second) goto no_page_found;
v_val |= HPTE_V_SECONDARY;
second = true; goto do_second;
}
r = be64_to_cpu(pteg[i+1]);
pp = (r & HPTE_R_PP) | key; if (r & HPTE_R_PP0)
pp |= 8;
/* Update PTE R and C bits, so the guest's swapper knows we used the
* page */ if (gpte->may_read && !(r & HPTE_R_R)) { /* * Set the accessed flag. * We have to write this back with a single byte write * because another vcpu may be accessing this on * non-PAPR platforms such as mac99, and this is * what real hardware does.
*/ char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
r |= HPTE_R_R;
put_user(r >> 8, addr + 6);
} if (iswrite && gpte->may_write && !(r & HPTE_R_C)) { /* Set the dirty flag */ /* Use a single byte write */ char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
r |= HPTE_R_C;
put_user(r, addr + 7);
}
mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
if (!gpte->may_read || (iswrite && !gpte->may_write)) return -EPERM; return 0;
/* * According to Book3 2.01 mtsrin is implemented as: * * The SLB entry specified by (RB)32:35 is loaded from register * RS, as follows. * * SLBE Bit Source SLB Field * * 0:31 0x0000_0000 ESID-0:31 * 32:35 (RB)32:35 ESID-32:35 * 36 0b1 V * 37:61 0x00_0000|| 0b0 VSID-0:24 * 62:88 (RS)37:63 VSID-25:51 * 89:91 (RS)33:35 Ks Kp N * 92 (RS)36 L ((RS)36 must be 0b0) * 93 0b0 C
*/
/* * The tlbie instruction changed behaviour starting with * POWER6. POWER6 and later don't have the large page flag * in the instruction but in the RB value, along with bits * indicating page and segment sizes.
*/ if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) { /* POWER6 or later */ if (va & 1) { /* L bit */ if ((va & 0xf000) == 0x1000)
mask = 0xFFFFFFFF0ULL; /* 64k page */ else
mask = 0xFFFFFF000ULL; /* 16M page */
}
} else { /* older processors, e.g. PPC970 */ if (large)
mask = 0xFFFFFF000ULL;
} /* flush this VA on all vcpus */
kvm_for_each_vcpu(i, v, vcpu->kvm)
kvmppc_mmu_pte_vflush(v, va >> 12, mask);
}
switch (msr & (MSR_DR|MSR_IR)) { case 0:
gvsid = VSID_REAL | esid; break; case MSR_IR:
gvsid |= VSID_REAL_IR; break; case MSR_DR:
gvsid |= VSID_REAL_DR; break; case MSR_DR|MSR_IR: if (!slb) goto no_slb;
break; default:
BUG(); break;
}
#ifdef CONFIG_PPC_64K_PAGES /* * Mark this as a 64k segment if the host is using * 64k pages, the host MMU supports 64k pages and * the guest segment page size is >= 64k, * but not if this segment contains the magic page.
*/ if (pagesize >= MMU_PAGE_64K &&
mmu_psize_defs[MMU_PAGE_64K].shift &&
!segment_contains_magic_page(vcpu, esid))
gvsid |= VSID_64K; #endif
if (kvmppc_get_msr(vcpu) & MSR_PR)
gvsid |= VSID_PR;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.