/* * The "classic" 32-bit implementation of the PowerPC MMU uses a hash * table containing PTEs, together with a set of 16 segment registers, * to define the virtual to physical address mapping. * * We use the hash table as an extended TLB, i.e. a cache of currently * active mappings. We maintain a two-level page table tree, much * like that used by the i386, for the sake of the Linux memory * management code. Low-level assembler code in hash_low_32.S * (procedure hash_page) is responsible for extracting ptes from the * tree and putting them into the hash table when necessary, and * updating the accessed and modified bits in the page table tree.
*/
#ifdef CONFIG_PTE_64BIT /* We never clear the high word of the pte */ #define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE) #else #define _PTE_NONE_MASK _PAGE_HASHPTE #endif
/* We borrow the _PAGE_READ bit to store the exclusive marker in swap PTEs. */ #define _PAGE_SWP_EXCLUSIVE _PAGE_READ
/* And here we include common definitions */
#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
/* * Location of the PFN in the PTE. Most 32-bit platforms use the same * as _PAGE_SHIFT here (ie, naturally aligned). * Platform who don't just pre-define the value so we don't override it here.
*/ #define PTE_RPN_SHIFT (PAGE_SHIFT)
/* * The mask covered by the RPN must be a ULL on 32-bit platforms with * 64-bit PTEs.
*/ #ifdef CONFIG_PTE_64BIT #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) #define MAX_POSSIBLE_PHYSMEM_BITS 36 #else #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) #define MAX_POSSIBLE_PHYSMEM_BITS 32 #endif
/* * _PAGE_CHG_MASK masks of bits that are to be preserved across * pgprot changes.
*/ #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
_PAGE_ACCESSED | _PAGE_SPECIAL)
/* * We define 2 sets of base prot bits, one for basic pages (ie, * cacheable kernel and user pages) and one for non cacheable * pages. We always set _PAGE_COHERENT when SMP is enabled or * the processor might need it for DMA coherency.
*/ #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
/* * The normal case is that PTEs are 32-bits and we have a 1-page * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus * * For any >32-bit physical address platform, we can use the following * two level page table layout where the pgdir is 8KB and the MS 13 bits * are an index to the second level table. The combined pgdir/pmd first * level has 2048 entries and the second level has 512 64-bit PTE entries. * -Matt
*/ /* PGDIR_SHIFT determines what a top-level page table entry can map */ #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1))
int map_kernel_page(unsignedlong va, phys_addr_t pa, pgprot_t prot); void unmap_kernel_page(unsignedlong va);
#endif/* !__ASSEMBLY__ */
/* * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary * value (for now) on others, from where we can start layout kernel * virtual space that goes below PKMAP and FIXMAP
*/
/* * ioremap_bot starts at that address. Early ioremaps move down from there, * until mem_init() at which point this becomes the top of the vmalloc * and ioremap space
*/ #ifdef CONFIG_HIGHMEM #define IOREMAP_TOP PKMAP_BASE #else #define IOREMAP_TOP FIXADDR_START #endif
/* PPC32 shares vmalloc area with ioremap */ #define IOREMAP_START VMALLOC_START #define IOREMAP_END VMALLOC_END
/* * Just any arbitrary offset to the start of the vmalloc VM area: the * current 16MB value just means that there will be a 64MB "hole" after the * physical memory until the kernel virtual memory starts. That means that * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) * * We no longer map larger than phys RAM with the BATs so we don't have * to worry about the VMALLOC_OFFSET causing problems. We do have to worry * about clashes between our early calls to ioremap() that start growing down * from ioremap_base being run into the VM area allocations (growing upwards * from VMALLOC_START). For this reason we have ioremap_bot to check when * we actually run into our mappings setup in the early boot with the VM * system. This really does become a problem for machines with good amounts * of RAM. -- Cort
*/ #define VMALLOC_OFFSET (0x1000000) /* 16M */
/* Bits to mask out from a PGD to get to the PUD page */ #define PGD_MASKED_BITS 0
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) /* * Bits in a linux-style PTE. These match the bits in the * (hardware-defined) PowerPC PTE as closely as possible.
*/
#define pte_clear(mm, addr, ptep) \ do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
/* * When flushing the tlb entry for a page, we also need to flush the hash * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
*/ externint flush_hash_pages(unsigned context, unsignedlong va, unsignedlong pmdval, int count);
/* Add an HPTE to the hash table */ externvoid add_hash_page(unsigned context, unsignedlong va, unsignedlong pmdval);
/* Flush an entry from the TLB/hash table */ staticinlinevoid flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsignedlong addr)
{ if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) { unsignedlong ptephys = __pa(ptep) & PAGE_MASK;
/* * PTE updates. This function is called whenever an existing * valid PTE is updated. This does -not- include set_pte_at() * which nowadays only sets a new PTE. * * Depending on the type of MMU, we may need to use atomic updates * and the PTE may be either 32 or 64 bit wide. In the later case, * when using atomic updates, only the low part of the PTE is * accessed atomically.
*/ staticinline pte_basic_t pte_update(struct mm_struct *mm, unsignedlong addr, pte_t *p, unsignedlong clr, unsignedlong set, int huge)
{
pte_basic_t old;
if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) { unsignedlong tmp;
/* * 2.6 calls this without flushing the TLB entry; this is wrong * for our hash-based implementation, we fix that up here.
*/ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG staticinlineint __ptep_test_and_clear_young(struct mm_struct *mm, unsignedlong addr, pte_t *ptep)
{ unsignedlong old;
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); if (old & _PAGE_HASHPTE)
flush_hash_entry(mm, ptep, addr);
/* * We only find page table entry in the last level * Hence no need for other accessors
*/ #define pte_access_permitted pte_access_permitted staticinlinebool pte_access_permitted(pte_t pte, bool write)
{ /* * A read-only access is controlled by _PAGE_READ bit. * We have _PAGE_READ set for WRITE
*/ if (!pte_present(pte) || !pte_read(pte)) returnfalse;
if (write && !pte_write(pte)) returnfalse;
returntrue;
}
/* Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. * * Even if PTEs can be unsigned long long, a PFN is always an unsigned * long for now.
*/ staticinline pte_t pfn_pte(unsignedlong pfn, pgprot_t pgprot)
{ return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
pgprot_val(pgprot));
}
/* This low level function performs the actual PTE insertion * Setting the PTE depends on the MMU type and other factors. * * First case is 32-bit in UP mode with 32-bit PTEs, we need to preserve * the _PAGE_HASHPTE bit since we may not have invalidated the previous * translation in the hash yet (done in a subsequent flush_tlb_xxx()) * and see we need to keep track that this PTE needs invalidating. * * Second case is 32-bit with 64-bit PTE. In this case, we * can just store as long as we do the two halves in the right order * with a barrier in between. This is possible because we take care, * in the hash code, to pre-invalidate if the PTE was already hashed, * which synchronizes us with any concurrent invalidation. * In the percpu case, we fallback to the simple update preserving * the hash bits (ie, same as the non-SMP case). * * Third case is 32-bit in SMP mode with 32-bit PTEs. We use the * helper pte_update() which does an atomic update. We need to do that * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a * per-CPU PTE such as a kmap_atomic, we also do a simple update preserving * the hash bits instead.
*/ staticinlinevoid __set_pte_at(struct mm_struct *mm, unsignedlong addr,
pte_t *ptep, pte_t pte, int percpu)
{ if ((!IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_PTE_64BIT)) || percpu) {
*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
(pte_val(pte) & ~_PAGE_HASHPTE));
} elseif (IS_ENABLED(CONFIG_PTE_64BIT)) { if (pte_val(*ptep) & _PAGE_HASHPTE)
flush_hash_entry(mm, ptep, addr);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.