/* * Mask of all large folio orders supported for anonymous THP; all orders up to * and including PMD_ORDER, except order-0 (which is not "huge") and order-1 * (which is a limitation of the THP implementation).
*/ #define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
/* * Mask of all large folio orders supported for file THP. Folios in a DAX * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to * it. Same to PFNMAPs where there's neither page* nor pagecache.
*/ #define THP_ORDERS_ALL_SPECIAL \
(BIT(PMD_ORDER) | BIT(PUD_ORDER)) #define THP_ORDERS_ALL_FILE_DEFAULT \
((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
/* * Mask of all large folio orders supported for THP.
*/ #define THP_ORDERS_ALL \
(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
#define TVA_SMAPS (1 << 0) /* Will be used for procfs */ #define TVA_IN_PF (1 << 1) /* Page fault handler */ #define TVA_ENFORCE_SYSFS (1 << 2) /* Obey sysfs configuration */
/* * Do the below checks: * - For file vma, check if the linear page offset of vma is * order-aligned within the file. The hugepage is * guaranteed to be order-aligned within the file, but we must * check that the order-aligned addresses in the VMA map to * order-aligned offsets within the file, else the hugepage will * not be mappable. * - For all vmas, check if the haddr is in an aligned hugepage * area.
*/ staticinlinebool thp_vma_suitable_order(struct vm_area_struct *vma, unsignedlong addr, int order)
{ unsignedlong hpage_size = PAGE_SIZE << order; unsignedlong haddr;
/* Don't have to check pgoff for anonymous vma */ if (!vma_is_anonymous(vma)) { if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
hpage_size >> PAGE_SHIFT)) returnfalse;
}
/* * Filter the bitfield of input orders to the ones suitable for use in the vma. * See thp_vma_suitable_order(). * All orders that pass the checks are returned as a bitfield.
*/ staticinlineunsignedlong thp_vma_suitable_orders(struct vm_area_struct *vma, unsignedlong addr, unsignedlong orders)
{ int order;
/* * Iterate over orders, highest to lowest, removing orders that don't * meet alignment requirements from the set. Exit loop at first order * that meets requirements, since all lower orders must also meet * requirements.
*/
order = highest_order(orders);
while (orders) { if (thp_vma_suitable_order(vma, addr, order)) break;
order = next_order(&orders, order);
}
/** * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma * @vma: the vm area to check * @vm_flags: use these vm_flags instead of vma->vm_flags * @tva_flags: Which TVA flags to honour * @orders: bitfield of all orders to consider * * Calculates the intersection of the requested hugepage orders and the allowed * hugepage orders for the provided vma. Permitted orders are encoded as a set * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3 * corresponds to order-3, etc). Order-0 is never considered a hugepage order. * * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage * orders are allowed.
*/ staticinline unsignedlong thp_vma_allowable_orders(struct vm_area_struct *vma,
vm_flags_t vm_flags, unsignedlong tva_flags, unsignedlong orders)
{ /* Optimization to check if required orders are enabled early. */ if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) { unsignedlong mask = READ_ONCE(huge_anon_orders_always);
if (vm_flags & VM_HUGEPAGE)
mask |= READ_ONCE(huge_anon_orders_madvise); if (hugepage_global_always() ||
((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
mask |= READ_ONCE(huge_anon_orders_inherit);
staticinlinebool vma_thp_disabled(struct vm_area_struct *vma,
vm_flags_t vm_flags)
{ /* * Explicitly disabled through madvise or prctl, or some * architectures may disable THP for some mappings, for * example, s390 kvm.
*/ return (vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags);
}
staticinlinebool thp_disabled_by_hw(void)
{ /* If the hardware/firmware marked hugepage support disabled. */ return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED);
}
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins); int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, unsignedint new_order); int min_order_for_split(struct folio *folio); int split_folio_to_list(struct folio *folio, struct list_head *list); bool uniform_split_supported(struct folio *folio, unsignedint new_order, bool warns); bool non_uniform_split_supported(struct folio *folio, unsignedint new_order, bool warns); int folio_split(struct folio *folio, unsignedint new_order, struct page *page, struct list_head *list); /* * try_folio_split_to_order - try to split a @folio at @page to @new_order using * non uniform split. * @folio: folio to be split * @page: split to @new_order at the given page * @new_order: the target split order * * Try to split a @folio at @page using non uniform split to @new_order, if * non uniform split is not supported, fall back to uniform split. After-split * folios are put back to LRU list. Use min_order_for_split() to get the lower * bound of @new_order. * * Return: 0: split is successful, otherwise split failed.
*/ staticinlineint try_folio_split_to_order(struct folio *folio, struct page *page, unsignedint new_order)
{ if (!non_uniform_split_supported(folio, new_order, /* warns= */ false)) return split_huge_page_to_list_to_order(&folio->page, NULL,
new_order); return folio_split(folio, new_order, page, NULL);
} staticinlineint split_huge_page(struct page *page)
{ return split_huge_page_to_list_to_order(page, NULL, 0);
} void deferred_split_folio(struct folio *folio, bool partially_mapped);
/* mmap_lock must be held on entry */ staticinline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
{ if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd)) return __pmd_trans_huge_lock(pmd, vma); else return NULL;
} staticinline spinlock_t *pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
{ if (pud_trans_huge(*pud)) return __pud_trans_huge_lock(pud, vma); else return NULL;
}
/** * folio_test_pmd_mappable - Can we map this folio with a PMD? * @folio: The folio to test
*/ staticinlinebool folio_test_pmd_mappable(struct folio *folio)
{ return folio_order(folio) >= HPAGE_PMD_ORDER;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.