/* * Besides being unnecessary in the absence of SMT, this * check prevents trying to do lbarx/stbcx. on e5500 which * doesn't implement either feature.
*/ if (!cpu_has_feature(CPU_FTR_SMT)) return;
/* Just round-robin the entries and wrap when we hit the end */ if (unlikely(index == ncams - 1))
__this_cpu_write(next_tlbcam_idx, tlbcam_index); else
__this_cpu_inc(next_tlbcam_idx);
psize = vma_mmu_pagesize(vma);
shift = __ilog2(psize);
tsize = shift - 10; /* * We can't be interrupted while we're setting up the MAS * registers or after we've confirmed that no tlb exists.
*/
local_irq_save(flags);
book3e_tlb_lock();
if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
book3e_tlb_unlock();
local_irq_restore(flags); return;
}
/* We have to use the CAM(TLB1) on FSL parts for hugepages */
index = tlb1_next();
mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
if (mmu_has_feature(MMU_FTR_BIG_PHYS))
mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
asmvolatile ("tlbwe");
book3e_tlb_unlock();
local_irq_restore(flags);
}
/* * This is called at the end of handling a user page fault, when the * fault has been handled by updating a PTE in the linux page tables. * * This must always be called with the pte lock held.
*/ void __update_mmu_cache(struct vm_area_struct *vma, unsignedlong address, pte_t *ptep)
{ if (is_vm_hugetlb_page(vma))
book3e_hugetlb_preload(vma, address, *ptep);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.