/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003 Ralf Baechle
*/ #ifndef _ASM_PGTABLE_H #define _ASM_PGTABLE_H
/* * If _PAGE_NO_EXEC is not defined, we can't do page protection for * execute, and consider it to be the same as read. Also, write * permissions imply read permissions. This is the closest we can get * by reasonable means..
*/
/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to.
*/ #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
#ifdef CONFIG_XPA if (pte.pte_high & _PAGE_GLOBAL) { #else if (pte.pte_low & _PAGE_GLOBAL) { #endif
pte_t *buddy = ptep_buddy(ptep); /* * Make sure the buddy is global too (if it's !none, * it better already be global)
*/ if (pte_none(*buddy)) { if (!IS_ENABLED(CONFIG_XPA))
buddy->pte_low |= _PAGE_GLOBAL;
buddy->pte_high |= _PAGE_GLOBAL;
}
}
}
htw_stop(); /* Preserve global status for the pair */ if (IS_ENABLED(CONFIG_XPA)) { if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
null.pte_high = _PAGE_GLOBAL;
} else { if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
null.pte_low = null.pte_high = _PAGE_GLOBAL;
}
/* * Certain architectures need to do special things when pte's * within a page table are directly modified. Thus, the following * hook is made available.
*/ staticinlinevoid set_pte(pte_t *ptep, pte_t pteval)
{
*ptep = pteval; #if !defined(CONFIG_CPU_R3K_TLB) if (pte_val(pteval) & _PAGE_GLOBAL) {
pte_t *buddy = ptep_buddy(ptep); /* * Make sure the buddy is global too (if it's !none, * it better already be global)
*/ # ifdefined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL); # else
cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL); # endif
} #endif
}
staticinlinevoid pte_clear(struct mm_struct *mm, unsignedlong addr, pte_t *ptep)
{
htw_stop(); #if !defined(CONFIG_CPU_R3K_TLB) /* Preserve global status for the pair */ if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
set_pte(ptep, __pte(_PAGE_GLOBAL)); else #endif
set_pte(ptep, __pte(0));
htw_start();
} #endif
for (i = 0; i < nr; i++) { if (!pte_present(pte)) continue; if (pte_present(ptep[i]) &&
(pte_pfn(ptep[i]) == pte_pfn(pte))) continue;
do_sync = true;
}
/* * (pmds are folded into puds so this doesn't get actually called, * but the define is needed for a generic inline function.)
*/ #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
#ifndef __PAGETABLE_PMD_FOLDED /* * (puds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.)
*/ #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) #endif
/* * We used to declare this array with size but gcc 3.3 and older are not able * to find that this expression is a constant, so the size is dropped.
*/ extern pgd_t swapper_pg_dir[];
/* * Platform specific pte_special() and pte_mkspecial() definitions * are required only when ARCH_HAS_PTE_SPECIAL is enabled.
*/ #ifdefined(CONFIG_ARCH_HAS_PTE_SPECIAL) #ifdefined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) staticinlineint pte_special(pte_t pte)
{ return pte.pte_low & _PAGE_SPECIAL;
}
/* * Macro to make mark a page protection value as "uncacheable". Note * that "protection" is really a misnomer here as the protection value * contains the memory attribute bits, dirty bits, and various other * bits as well.
*/ #define pgprot_noncached pgprot_noncached
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS staticinlineint ptep_set_access_flags(struct vm_area_struct *vma, unsignedlong address, pte_t *ptep,
pte_t entry, int dirty)
{ if (!pte_same(*ptep, entry))
set_pte(ptep, entry); /* * update_mmu_cache will unconditionally execute, handling both * the case that the PTE changed and the spurious fault case.
*/ returntrue;
}
/* * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a * different prototype.
*/ #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR staticinline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsignedlong address, pmd_t *pmdp)
{
pmd_t old = *pmdp;
/* * We provide our own get_unmapped area to cope with the virtual aliasing * constraints placed on us by the cache architecture.
*/ #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#endif/* _ASM_PGTABLE_H */
Messung V0.5
¤ Dauer der Verarbeitung: 0.12 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.