// SPDX-License-Identifier: GPL-2.0-only /* * PowerPC version derived from arch/arm/mm/consistent.c * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) * * Copyright (C) 2000 Russell King
*/
/* * make an area consistent.
*/ staticvoid __dma_sync(void *vaddr, size_t size, int direction)
{ unsignedlong start = (unsignedlong)vaddr; unsignedlong end = start + size;
switch (direction) { case DMA_NONE:
BUG(); case DMA_FROM_DEVICE: /* * invalidate only when cache-line aligned otherwise there is * the potential for discarding uncommitted data from the cache
*/ if ((start | end) & (L1_CACHE_BYTES - 1))
flush_dcache_range(start, end); else
invalidate_dcache_range(start, end); break; case DMA_TO_DEVICE: /* writeback only */
clean_dcache_range(start, end); break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */
flush_dcache_range(start, end); break;
}
}
#ifdef CONFIG_HIGHMEM /* * __dma_sync_page() implementation for systems using highmem. * In this case, each page of a buffer must be kmapped/kunmapped * in order to have a virtual address for __dma_sync(). This must * not sleep so kmap_atomic()/kunmap_atomic() are used. * * Note: yes, it is possible and correct to have a buffer extend * beyond the first page.
*/ staticinlinevoid __dma_sync_page_highmem(struct page *page, unsignedlong offset, size_t size, int direction)
{
size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
size_t cur_size = seg_size; unsignedlong flags, start, seg_offset = offset; int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; int seg_nr = 0;
local_irq_save(flags);
do {
start = (unsignedlong)kmap_atomic(page + seg_nr) + seg_offset;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.