/* * Flush all TLB entries on the local CPU.
*/ staticinlinevoid __tlb_flush_local(void)
{ asmvolatile("ptlb" : : : "memory");
}
/* * Flush TLB entries for a specific ASCE on all CPUs
*/ staticinlinevoid __tlb_flush_idte(unsignedlong asce)
{ unsignedlong opt;
opt = IDTE_PTOA; if (machine_has_tlb_guest())
opt |= IDTE_GUEST_ASCE; /* Global TLB flush for the mm */ asmvolatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc");
}
/* * Flush all TLB entries on all CPUs.
*/ staticinlinevoid __tlb_flush_global(void)
{ unsignedint dummy = 0;
csp(&dummy, 0, 0);
}
/* * Flush TLB entries for a specific mm on all CPUs (in case gmap is used * this implicates multiple ASCEs!).
*/ staticinlinevoid __tlb_flush_mm(struct mm_struct *mm)
{ unsignedlong gmap_asce;
/* * TLB flushing: * flush_tlb() - flushes the current mm struct TLBs * flush_tlb_all() - flushes all processes TLBs * flush_tlb_mm(mm) - flushes the specified mm context TLB's * flush_tlb_page(vma, vmaddr) - flushes one page * flush_tlb_range(vma, start, end) - flushes a range of pages * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
*/
/* * flush_tlb_mm goes together with ptep_set_wrprotect for the * copy_page_range operation and flush_tlb_range is related to * ptep_get_and_clear for change_protection. ptep_set_wrprotect and * ptep_get_and_clear do not flush the TLBs directly if the mm has * only one user. At the end of the update the flush_tlb_mm and * flush_tlb_range functions need to do the flush.
*/ #define flush_tlb() do { } while (0) #define flush_tlb_all() do { } while (0) #define flush_tlb_page(vma, addr) do { } while (0)
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.