/* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user * space" model to handle this. * * Note that this code needs to run on the current CPU.
*/ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsignedlong uaddr, void *dst, constvoid *src, unsignedlong len)
{ #ifdef CONFIG_SMP
preempt_disable(); #endif
memcpy(dst, src, len);
flush_ptrace_access(vma, page, uaddr, dst, len); #ifdef CONFIG_SMP
preempt_enable(); #endif
}
void __flush_dcache_folio(struct address_space *mapping, struct folio *folio)
{ /* * Writeback any data associated with the kernel mapping of this * page. This ensures that data in the physical page is mutually * coherent with the kernels mapping.
*/ if (!folio_test_highmem(folio)) {
__cpuc_flush_dcache_area(folio_address(folio),
folio_size(folio));
} else { unsignedlong i; if (cache_is_vipt_nonaliasing()) { for (i = 0; i < folio_nr_pages(folio); i++) { void *addr = kmap_local_folio(folio,
i * PAGE_SIZE);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_local(addr);
}
} else { for (i = 0; i < folio_nr_pages(folio); i++) { void *addr = kmap_high_get(folio_page(folio, i)); if (addr) {
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_high(folio_page(folio, i));
}
}
}
}
/* * If this is a page cache folio, and we have an aliasing VIPT cache, * we only need to do one flush - which would be at the relevant * userspace colour, which is congruent with folio->index.
*/ if (mapping && cache_is_vipt_aliasing())
flush_pfn_alias(folio_pfn(folio), folio_pos(folio));
}
/* * There are possible user space mappings of this page: * - VIVT cache: we need to also write back and invalidate all user * data in the current VM view associated with this page. * - aliasing VIPT: we only need to find one mapping of this page.
*/
pgoff = folio->index;
pgoff_end = pgoff + folio_nr_pages(folio) - 1;
if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) /* only flush non-aliasing VIPT caches for exec mappings */ return;
pfn = pte_pfn(pteval); if (!pfn_valid(pfn)) return;
folio = page_folio(pfn_to_page(pfn)); if (folio_test_reserved(folio)) return;
if (cache_is_vipt_aliasing())
mapping = folio_flush_mapping(folio); else
mapping = NULL;
if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
__flush_dcache_folio(mapping, folio);
if (pte_exec(pteval))
__flush_icache_all();
} #endif
/* * Ensure cache coherency between kernel mapping and userspace mapping * of this page. * * We have three cases to consider: * - VIPT non-aliasing cache: fully coherent so nothing required. * - VIVT: fully aliasing, so we need to handle every alias in our * current VM view. * - VIPT aliasing: need to handle one alias in our current VM view. * * If we need to handle aliasing: * If the page only exists in the page cache and there are no user * space mappings, we can be lazy and remember that we may have dirty * kernel cache lines for later. Otherwise, we assume we have * aliasing mappings. * * Note that we disable the lazy flush for SMP configurations where * the cache maintenance operations are not automatically broadcasted.
*/ void flush_dcache_folio(struct folio *folio)
{ struct address_space *mapping;
/* * The zero page is never written to, so never has any dirty * cache lines, and therefore never needs to be flushed.
*/ if (is_zero_pfn(folio_pfn(folio))) return;
if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) { if (test_bit(PG_dcache_clean, &folio->flags))
clear_bit(PG_dcache_clean, &folio->flags); return;
}
void flush_dcache_page(struct page *page)
{
flush_dcache_folio(page_folio(page));
}
EXPORT_SYMBOL(flush_dcache_page); /* * Flush an anonymous page so that users of get_user_pages() * can safely access the data. The expected sequence is: * * get_user_pages() * -> flush_anon_page * memcpy() to/from page * if written to page, flush_dcache_page()
*/ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsignedlong vmaddr); void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsignedlong vmaddr)
{ unsignedlong pfn;
/* VIPT non-aliasing caches need do nothing */ if (cache_is_vipt_nonaliasing()) return;
/* * Write back and invalidate userspace mapping.
*/
pfn = page_to_pfn(page); if (cache_is_vivt()) {
flush_cache_page(vma, vmaddr, pfn);
} else { /* * For aliasing VIPT, we can flush an alias of the * userspace address only.
*/
flush_pfn_alias(pfn, vmaddr);
__flush_icache_all();
}
/* * Invalidate kernel mapping. No data should be contained * in this mapping of the page. FIXME: this is overkill * since we actually ask for a write-back and invalidate.
*/
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.1 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.