/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/arch/arm/mm/proc-arm922.S: MMU functions for ARM922 * * Copyright (C) 1999,2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * Copyright (C) 2001 Altera Corporation * hacked for non-paged-MM by Hyok S. Choi, 2003. * * These are the low level assembler for performing cache and TLB * functions on the arm922. * * CONFIG_CPU_ARM922_CPU_IDLE -> nohlt
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
/* * The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/* * The number of data cache segments.
*/
#define CACHE_DSEGMENTS 4
/* * The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 64
/* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintenance instructions. (I think this should * be 32768).
*/
#define CACHE_DLIMIT 8192
.text /* * cpu_arm922_proc_init()
*/
SYM_TYPED_FUNC_START(cpu_arm922_proc_init)
ret lr
SYM_FUNC_END(cpu_arm922_proc_init)
/* * cpu_arm922_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
SYM_TYPED_FUNC_START(cpu_arm922_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
SYM_FUNC_END(cpu_arm922_reset)
.popsection
/* * cpu_arm922_do_idle()
*/
.align 5
SYM_TYPED_FUNC_START(cpu_arm922_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
SYM_FUNC_END(cpu_arm922_do_idle)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/* * flush_icache_all() * * Unconditionally clean and invalidate the entire icache.
*/
SYM_TYPED_FUNC_START(arm922_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
SYM_FUNC_END(arm922_flush_icache_all)
/* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular * address space.
*/
SYM_FUNC_ALIAS(arm922_flush_user_cache_all, arm922_flush_kern_cache_all)
/* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address
*/
SYM_TYPED_FUNC_START(arm922_coherent_kern_range)
#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b arm922_coherent_user_range
#endif
SYM_FUNC_END(arm922_coherent_kern_range)
/* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start, end. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address
*/
SYM_TYPED_FUNC_START(arm922_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
SYM_FUNC_END(arm922_coherent_user_range)
/* * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - addr - kernel address * - size - region size
*/
SYM_TYPED_FUNC_START(arm922_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
SYM_FUNC_END(arm922_flush_kern_dcache_area)
/* * dma_inv_range(start, end) * * Invalidate (discard) the specified virtual address range. * May not write back any entries. If 'start' or 'end' * are not cache line aligned, those lines must be written * back. * * - start - virtual start address * - end - virtual end address * * (same as v4wb)
*/
arm922_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
/* * cpu_arm922_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables
*/
.align 5
SYM_TYPED_FUNC_START(cpu_arm922_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
@ && 'Clean & Invalidate whole DCache'
@ && Re-written to use Index Ops.
@ && Uses registers r1, r3 and ip
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.