/* TLB flush actions. Used as argument to tlbiel_all() */ enum {
TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
};
staticinlinevoid tlbiel_all(void)
{ /* * This is used for host machine check and bootup. * * This uses early_radix_enabled and implementations use * early_cpu_has_feature etc because that works early in boot * and this is the machine check path which is not performance * critical.
*/ if (early_radix_enabled())
radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL); else
hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
}
staticinlinevoid tlbiel_all_lpid(bool radix)
{ /* * This is used for guest machine check.
*/ if (radix)
radix__tlbiel_all(TLB_INVAL_SCOPE_LPID); else
hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
}
#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault staticinlinevoid flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, unsignedlong address,
pte_t *ptep)
{ /* * Book3S 64 does not require spurious fault flushes because the PTE * must be re-fetched in case of an access permission problem. So the * only reason for a spurious fault should be concurrent modification * to the PTE, in which case the PTE will eventually be re-fetched by * the MMU when it attempts the access again. * * See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table * Entry, Setting a Reference or Change Bit or Upgrading Access * Authority (PTE Subject to Atomic Hardware Updates): * * "If the only change being made to a valid PTE that is subject to * atomic hardware updates is to set the Reference or Change bit to * 1 or to upgrade access authority, a simpler sequence suffices * because the translation hardware will refetch the PTE if an * access is attempted for which the only problems were reference * and/or change bits needing to be set or insufficient access * authority." * * The nest MMU in POWER9 does not perform this PTE re-fetch, but * it avoids the spurious fault problem by flushing the TLB before * upgrading PTE permissions, see radix__ptep_set_access_flags.
*/
}
/* * The return value of this function doesn't matter for hash, * ptep_modify_prot_start() does a pte_update() which does or schedules * any necessary hash table update and flush.
*/ if (!radix_enabled()) returntrue;
/* * We do not expect kernel mappings or non-PTEs or not-present PTEs.
*/
VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));
/* * Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED. * * In theory, some changed software bits could be tolerated, in * practice those should rarely if ever matter.
*/
if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED)) returntrue;
/* * If any of the above was present in old but cleared in new, flush. * With the exception of _PAGE_ACCESSED, don't worry about flushing * if that was cleared (see the comment in ptep_clear_flush_young()).
*/ if ((delta & ~_PAGE_ACCESSED) & oldval) returntrue;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.