staticinlineunsignedlong ___tlbie(unsignedlong vpn, int psize, int apsize, int ssize)
{ unsignedlong va; unsignedint penc; unsignedlong sllp;
/* * We need 14 to 65 bits of va for a tlibe of 4K page * With vpn we ignore the lower VPN_SHIFT bits already. * And top two bits are already ignored because we can * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT * of 12.
*/
va = vpn << VPN_SHIFT; /* * clear top 16 bits of 64bit va, non SLS segment * Older versions of the architecture (2.02 and earler) require the * masking of the top 16 bits.
*/ if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
va &= ~(0xffffULL << 48);
switch (psize) { case MMU_PAGE_4K: /* clear out bits after (52) [0....52.....63] */
va &= ~((1ul << (64 - 52)) - 1);
va |= ssize << 8;
sllp = get_sllp_encoding(apsize);
va |= sllp << 5; asmvolatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory"); break; default: /* We need 14 to 14 + i bits of va */
penc = mmu_psize_defs[psize].penc[apsize];
va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12;
va |= ssize << 8; /* * AVAL bits: * We don't need all the bits, but rest of the bits * must be ignored by the processor. * vpn cover upto 65 bits of va. (0...65) and we need * 58..64 bits of va.
*/
va |= (vpn & 0xfe); /* AVAL */
va |= 1; /* L */ asmvolatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory"); break;
} return va;
}
staticinlinevoid fixup_tlbie_vpn(unsignedlong vpn, int psize, int apsize, int ssize)
{ if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { /* Radix flush for a hash guest */
/* * Need the extra ptesync to make sure we don't * re-order the tlbie
*/ asmvolatile("ptesync": : :"memory"); asmvolatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
}
if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { /* Need the extra ptesync to ensure we don't reorder tlbie*/ asmvolatile("ptesync": : :"memory");
___tlbie(vpn, psize, apsize, ssize);
}
}
staticinlinevoid __tlbie(unsignedlong vpn, int psize, int apsize, int ssize)
{ unsignedlong rb;
staticinlinevoid __tlbiel(unsignedlong vpn, int psize, int apsize, int ssize)
{ unsignedlong va; unsignedint penc; unsignedlong sllp;
/* VPN_SHIFT can be atmost 12 */
va = vpn << VPN_SHIFT; /* * clear top 16 bits of 64 bit va, non SLS segment * Older versions of the architecture (2.02 and earler) require the * masking of the top 16 bits.
*/ if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
va &= ~(0xffffULL << 48);
switch (psize) { case MMU_PAGE_4K: /* clear out bits after(52) [0....52.....63] */
va &= ~((1ul << (64 - 52)) - 1);
va |= ssize << 8;
sllp = get_sllp_encoding(apsize);
va |= sllp << 5; asmvolatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 0), %1)
: : "r" (va), "i" (CPU_FTR_ARCH_206)
: "memory"); break; default: /* We need 14 to 14 + i bits of va */
penc = mmu_psize_defs[psize].penc[apsize];
va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12;
va |= ssize << 8; /* * AVAL bits: * We don't need all the bits, but rest of the bits * must be ignored by the processor. * vpn cover upto 65 bits of va. (0...65) and we need * 58..64 bits of va.
*/
va |= (vpn & 0xfe);
va |= 1; /* L */ asmvolatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 1), %1)
: : "r" (va), "i" (CPU_FTR_ARCH_206)
: "memory"); break;
}
trace_tlbie(0, 1, va, 0, 0, 0, 0);
}
staticinlinevoid tlbie(unsignedlong vpn, int psize, int apsize, int ssize, int local)
{ unsignedint use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
for (i = 0; i < HPTES_PER_GROUP; i++) { if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) { /* retry with lock held */
native_lock_hpte(hptep); if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) break;
native_unlock_hpte(hptep);
}
hptep++;
}
if (i == HPTES_PER_GROUP) {
local_irq_restore(flags); return -1;
}
hptep->r = cpu_to_be64(hpte_r); /* Guarantee the second dword is visible before the valid bit */
eieio(); /* * Now set the first dword including the valid bit * NOTE: this also unlocks the hpte
*/
release_hpte_lock();
hptep->v = cpu_to_be64(hpte_v);
__asm__ __volatile__ ("ptesync" : : : "memory");
local_irq_restore(flags);
return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
}
staticlong native_hpte_remove(unsignedlong hpte_group)
{ unsignedlong hpte_v, flags; struct hash_pte *hptep; int i; int slot_offset;
local_irq_save(flags);
DBG_LOW(" remove(group=%lx)\n", hpte_group);
/* pick a random entry to start at */
slot_offset = mftb() & 0x7;
for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + hpte_group + slot_offset;
hpte_v = be64_to_cpu(hptep->v);
if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) { /* retry with lock held */
native_lock_hpte(hptep);
hpte_v = be64_to_cpu(hptep->v); if ((hpte_v & HPTE_V_VALID)
&& !(hpte_v & HPTE_V_BOLTED)) break;
native_unlock_hpte(hptep);
}
slot_offset++;
slot_offset &= 0x7;
}
if (i == HPTES_PER_GROUP) {
i = -1; goto out;
}
/* Invalidate the hpte. NOTE: this also unlocks it */
release_hpte_lock();
hptep->v = 0;
out:
local_irq_restore(flags); return i;
}
staticlong native_hpte_updatepp(unsignedlong slot, unsignedlong newpp, unsignedlong vpn, int bpsize, int apsize, int ssize, unsignedlong flags)
{ struct hash_pte *hptep = htab_address + slot; unsignedlong hpte_v, want_v; int ret = 0, local = 0; unsignedlong irqflags;
hpte_v = hpte_get_old_v(hptep); /* * We need to invalidate the TLB always because hpte_remove doesn't do * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less * random entry from it. When we do that we don't invalidate the TLB * (hpte_remove) because we assume the old translation is still * technically "valid".
*/ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
DBG_LOW(" -> miss\n");
ret = -1;
} else {
native_lock_hpte(hptep); /* recheck with locks held */
hpte_v = hpte_get_old_v(hptep); if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
!(hpte_v & HPTE_V_VALID))) {
ret = -1;
} else {
DBG_LOW(" -> hit\n"); /* Update the HPTE */
hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
~(HPTE_R_PPP | HPTE_R_N)) |
(newpp & (HPTE_R_PPP | HPTE_R_N |
HPTE_R_C)));
}
native_unlock_hpte(hptep);
}
if (flags & HPTE_LOCAL_UPDATE)
local = 1; /* * Ensure it is out of the tlb too if it is not a nohpte fault
*/ if (!(flags & HPTE_NOHPTE_UPDATE))
tlbie(vpn, bpsize, apsize, ssize, local);
/* * We try to keep bolted entries always in primary hash * But in some case we can find them in secondary too.
*/
hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = __native_hpte_find(want_v, hpte_group); if (slot < 0) { /* Try in secondary */
hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = __native_hpte_find(want_v, hpte_group); if (slot < 0) return -1;
}
return slot;
}
/* * Update the page protection bits. Intended to be used to create * guard pages for kernel data structures on pages which are bolted * in the HPT. Assumes pages being operated on will not be stolen. * * No need to lock here because we should be the only user.
*/ staticvoid native_hpte_updateboltedpp(unsignedlong newpp, unsignedlong ea, int psize, int ssize)
{ unsignedlong vpn; unsignedlong vsid; long slot; struct hash_pte *hptep; unsignedlong flags;
slot = native_hpte_find(vpn, psize, ssize); if (slot == -1)
panic("could not find page to bolt\n");
hptep = htab_address + slot;
/* Update the HPTE */
hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
~(HPTE_R_PPP | HPTE_R_N)) |
(newpp & (HPTE_R_PPP | HPTE_R_N))); /* * Ensure it is out of the tlb too. Bolted entries base and * actual page size will be same.
*/
tlbie(vpn, psize, psize, ssize, 0);
local_irq_restore(flags);
}
/* * Remove a bolted kernel entry. Memory hotplug uses this. * * No need to lock here because we should be the only user.
*/ staticint native_hpte_removebolted(unsignedlong ea, int psize, int ssize)
{ unsignedlong vpn; unsignedlong vsid; long slot; struct hash_pte *hptep; unsignedlong flags;
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
native_lock_hpte(hptep); /* recheck with locks held */
hpte_v = hpte_get_old_v(hptep);
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { /* Invalidate the hpte. NOTE: this also unlocks it */
release_hpte_lock();
hptep->v = 0;
} else
native_unlock_hpte(hptep);
} /* * We need to invalidate the TLB always because hpte_remove doesn't do * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less * random entry from it. When we do that we don't invalidate the TLB * (hpte_remove) because we assume the old translation is still * technically "valid".
*/
tlbie(vpn, bpsize, apsize, ssize, local);
local_irq_restore(flags);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE staticvoid native_hugepage_invalidate(unsignedlong vsid, unsignedlong addr, unsignedchar *hpte_slot_array, int psize, int ssize, int local)
{ int i; struct hash_pte *hptep; int actual_psize = MMU_PAGE_16M; unsignedint max_hpte_count, valid; unsignedlong flags, s_addr = addr; unsignedlong hpte_v, want_v, shift; unsignedlong hidx, vpn = 0, hash, slot;
/* Even if we miss, we need to invalidate the TLB */ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { /* recheck with locks held */
native_lock_hpte(hptep);
hpte_v = hpte_get_old_v(hptep);
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { /* Invalidate the hpte. NOTE: this also unlocks it */
release_hpte_lock();
hptep->v = 0;
} else
native_unlock_hpte(hptep);
} /* * We need to do tlb invalidate for all the address, tlbie * instruction compares entry_VA in tlb with the VA specified * here
*/
tlbie(vpn, psize, actual_psize, ssize, local);
}
local_irq_restore(flags);
} #else staticvoid native_hugepage_invalidate(unsignedlong vsid, unsignedlong addr, unsignedchar *hpte_slot_array, int psize, int ssize, int local)
{
WARN(1, "%s called without THP support\n", __func__);
} #endif
staticvoid hpte_decode(struct hash_pte *hpte, unsignedlong slot, int *psize, int *apsize, int *ssize, unsignedlong *vpn)
{ unsignedlong avpn, pteg, vpi; unsignedlong hpte_v = be64_to_cpu(hpte->v); unsignedlong hpte_r = be64_to_cpu(hpte->r); unsignedlong vsid, seg_off; int size, a_size, shift; /* Look at the 8 bit LP value */ unsignedint lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
hpte_r = hpte_new_to_old_r(hpte_r);
} if (!(hpte_v & HPTE_V_LARGE)) {
size = MMU_PAGE_4K;
a_size = MMU_PAGE_4K;
} else {
size = hpte_page_sizes[lp] & 0xf;
a_size = hpte_page_sizes[lp] >> 4;
} /* This works for all page sizes, and for 256M and 1T segments */
*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
shift = mmu_psize_defs[size].shift;
switch (*ssize) { case MMU_SEGSIZE_256M: /* We only have 28 - 23 bits of seg_off in avpn */
seg_off = (avpn & 0x1f) << 23;
vsid = avpn >> 5; /* We can find more bits from the pteg value */ if (shift < 23) {
vpi = (vsid ^ pteg) & htab_hash_mask;
seg_off |= vpi << shift;
}
*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; break; case MMU_SEGSIZE_1T: /* We only have 40 - 23 bits of seg_off in avpn */
seg_off = (avpn & 0x1ffff) << 23;
vsid = avpn >> 17; if (shift < 23) {
vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
seg_off |= vpi << shift;
}
*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; break; default:
*vpn = size = 0;
}
*psize = size;
*apsize = a_size;
}
/* * clear all mappings on kexec. All cpus are in real mode (or they will * be when they isi), and we are the only one left. We rely on our kernel * mapping being 0xC0's and the hardware ignoring those two real bits. * * This must be called with interrupts disabled. * * Taking the native_tlbie_lock is unsafe here due to the possibility of * lockdep being on. On pre POWER5 hardware, not taking the lock could * cause deadlock. POWER5 and newer not taking the lock is fine. This only * gets called during boot before secondary CPUs have come up and during * crashdump and all bets are off anyway. * * TODO: add batching support when enabled. remember, no dynamic memory here, * although there is the control page available...
*/ static notrace void native_hpte_clear(void)
{ unsignedlong vpn = 0; unsignedlong slot, slots; struct hash_pte *hptep = htab_address; unsignedlong hpte_v; unsignedlong pteg_count; int psize, apsize, ssize;
pteg_count = htab_hash_mask + 1;
slots = pteg_count * HPTES_PER_GROUP;
for (slot = 0; slot < slots; slot++, hptep++) { /* * we could lock the pte here, but we are the only cpu * running, right? and for crash dump, we probably * don't want to wait for a maybe bad cpu.
*/
hpte_v = be64_to_cpu(hptep->v);
/* * Call __tlbie() here rather than tlbie() since we can't take the * native_tlbie_lock.
*/ if (hpte_v & HPTE_V_VALID) {
hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
hptep->v = 0;
___tlbie(vpn, psize, apsize, ssize);
}
}
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
asmvolatile("ptesync":::"memory"); for (i = 0; i < number; i++) {
vpn = batch->vpn[i];
pte = batch->pte[i];
pte_iterate_hashed_subpages(pte, psize,
vpn, index, shift) {
__tlbie(vpn, psize, psize, ssize);
} pte_iterate_hashed_end();
} /* * Just do one more with the last used values.
*/
fixup_tlbie_vpn(vpn, psize, psize, ssize); asmvolatile("eieio; tlbsync; ptesync":::"memory");
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.