/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/asm/assembler.h * * Copyright (C) 1996-2000 Russell King * * This file contains arm architecture specific defines * for the different processors. * * Do not include any C declarations in this file - it is included by * assembler source.
*/ #ifndef __ASM_ASSEMBLER_H__ #define __ASM_ASSEMBLER_H__
#ifndef __ASSEMBLY__ #error"Only include this from assembly code" #endif
/* Select code for any configuration running in BE8 mode */ #ifdef CONFIG_CPU_ENDIAN_BE8 #define ARM_BE8(code...) code #else #define ARM_BE8(code...) #endif
/* * Data preload for architectures that support it
*/ #if __LINUX_ARM_ARCH__ >= 5 #define PLD(code...) code #else #define PLD(code...) #endif
/* * This can be used to enable code to cacheline align the destination * pointer when bulk writing to memory. Experiments on StrongARM and * XScale didn't show this a worthwhile thing to do when the cache is not * set to write-allocate (this would need further testing on XScale when WA * is used). * * On Feroceon there is much to gain however, regardless of cache mode.
*/ #ifdef CONFIG_CPU_FEROCEON #define CALGN(code...) code #else #define CALGN(code...) #endif
#define IMM12_MASK 0xfff
/* the frame pointer used for stack unwinding */
ARM( fpreg .req r11 )
THUMB( fpreg .req r7 )
/* * Enable and disable interrupts
*/ #if __LINUX_ARM_ARCH__ >= 6
.macro disable_irq_notrace
cpsid i
.endm
.macro enable_irq
asm_trace_hardirqs_on
enable_irq_notrace
.endm /* * Save the current IRQ state and disable IRQs. Note that this macro * assumes FIQs are enabled, and that the processor is in SVC mode.
*/
.macro save_and_disable_irqs, oldcpsr #ifdef CONFIG_CPU_V7M
mrs \oldcpsr, primask #else
mrs \oldcpsr, cpsr #endif
disable_irq
.endm
/* * Restore interrupt state previously stored in a register. We don't * guarantee that this will preserve the flags.
*/
.macro restore_irqs_notrace, oldcpsr #ifdef CONFIG_CPU_V7M
msr primask, \oldcpsr #else
msr cpsr_c, \oldcpsr #endif
.endm
/* * Assembly version of "adr rd, BSYM(sym)". This should only be used to * reference local symbols in the same assembly file which are to be * resolved by the assembler. Other usage is undefined.
*/
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro badr\c, rd, sym #ifdef CONFIG_THUMB2_KERNEL
adr\c \rd, \sym + 1 #else
adr\c \rd, \sym #endif
.endm
.endr
/* * Get current thread_info.
*/
.macro get_thread_info, rd /* thread_info is the first member of struct task_struct */
get_current \rd
.endm
#ifdefined(CONFIG_CPU_V7M) /* * setmode is used to assert to be in svc mode during boot. For v7-M * this is done in __v7m_setup, so setmode can be empty here.
*/
.macro setmode, mode, reg
.endm #elifdefined(CONFIG_THUMB2_KERNEL)
.macro setmode, mode, reg
mov \reg, #\mode
msr cpsr_c, \reg
.endm #else
.macro setmode, mode, reg
msr cpsr_c, #\mode
.endm #endif
/* * Helper macro to enter SVC mode cleanly and mask interrupts. reg is * a scratch register for the macro to overwrite. * * This macro is intended for forcing the CPU into SVC mode at boot time. * you cannot return to the original mode.
*/
.macro safe_svcmode_maskall reg:req #if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
mrs \reg , cpsr
eor \reg, \reg, #HYP_MODE
tst \reg, #MODE_MASK
bic \reg , \reg , #MODE_MASK
orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
THUMB( orr \reg , \reg , #PSR_T_BIT )
bne 1f
orr \reg, \reg, #PSR_A_BIT
badr lr, 2f
msr spsr_cxsf, \reg
__MSR_ELR_HYP(14)
__ERET
1: msr cpsr_c, \reg
2: #else /* * workaround for possibly broken pre-v6 hardware * (akita, Sharp Zaurus C-1000, PXA270-based)
*/
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg #endif
.endm
/* * STRT/LDRT access macros with ARM and Thumb-2 variants
*/ #ifdef CONFIG_THUMB2_KERNEL
#ifndef CONFIG_THUMB2_KERNEL
.set .Lpc\@, . + 8 // PC bias
.ifc \op, add
add\c \reg, \tmp, pc
.else
\op\c \reg, [pc, \tmp]
.endif #else
.Lb\@: add\c \tmp, \tmp, pc /* * In Thumb-2 builds, the PC bias depends on whether we are currently * emitting into a .arm or a .thumb section. The size of the add opcode * above will be 2 bytes when emitting in Thumb mode and 4 bytes when * emitting in ARM mode, so let's use this to account for the bias.
*/
.set .Lpc\@, . + (. - .Lb\@)
/* * mov_l - move a constant value or [relocated] address into a register
*/
.macro mov_l, dst:req, imm:req, cond
.if __LINUX_ARM_ARCH__ < 7
ldr\cond \dst, =\imm
.else
movw\cond \dst, #:lower16:\imm
movt\cond \dst, #:upper16:\imm
.endif
.endm
/* * adr_l - adr pseudo-op with unlimited range * * @dst: destination register * @sym: name of the symbol * @cond: conditional opcode suffix
*/
.macro adr_l, dst:req, sym:req, cond
__adldst_l add, \dst, \sym, \dst, \cond
.endm
/* * ldr_l - ldr <literal> pseudo-op with unlimited range * * @dst: destination register * @sym: name of the symbol * @cond: conditional opcode suffix
*/
.macro ldr_l, dst:req, sym:req, cond
__adldst_l ldr, \dst, \sym, \dst, \cond
.endm
/* * str_l - str <literal> pseudo-op with unlimited range * * @src: source register * @sym: name of the symbol * @tmp: mandatory scratch register * @cond: conditional opcode suffix
*/
.macro str_l, src:req, sym:req, tmp:req, cond
__adldst_l str, \src, \sym, \tmp, \cond
.endm
.macro __ldst_va, op, reg, tmp, sym, cond, offset #if __LINUX_ARM_ARCH__ >= 7 || \
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
mov_l \tmp, \sym, \cond #else /* * Avoid a literal load, by emitting a sequence of ADD/LDR instructions * with the appropriate relocations. The combined sequence has a range * of -/+ 256 MiB, which should be sufficient for the core kernel and * for modules loaded into the module region.
*/
.globl \sym
.reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
.reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
.reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
.L0_\@: sub\cond \tmp, pc, #8 - \offset
.L1_\@: sub\cond \tmp, \tmp, #4 - \offset
.L2_\@: #endif
\op\cond \reg, [\tmp, #\offset]
.endm
/* * ldr_va - load a 32-bit word from the virtual address of \sym
*/
.macro ldr_va, rd:req, sym:req, cond, tmp, offset=0
.ifnb \tmp
__ldst_va ldr, \rd, \tmp, \sym, \cond, \offset
.else
__ldst_va ldr, \rd, \rd, \sym, \cond, \offset
.endif
.endm
/* * str_va - store a 32-bit word to the virtual address of \sym
*/
.macro str_va, rn:req, sym:req, tmp:req, cond
__ldst_va str, \rn, \tmp, \sym, \cond, 0
.endm
/* * ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym', * without using a temp register. Supported in ARM mode * only.
*/
.macro ldr_this_cpu_armv6, rd:req, sym:req
this_cpu_offset \rd
.globl \sym
.reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
.reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
.reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
add \rd, \rd, pc
.L0_\@: sub \rd, \rd, #4
.L1_\@: sub \rd, \rd, #0
.L2_\@: ldr \rd, [\rd, #4]
.endm
/* * ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym' * into register 'rd', which may be the stack pointer, * using 't1' and 't2' as general temp registers. These * are permitted to overlap with 'rd' if != sp
*/
.macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req #ifndef CONFIG_SMP
ldr_va \rd, \sym, tmp=\t1 #elif __LINUX_ARM_ARCH__ >= 7 || \
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
this_cpu_offset \t1
mov_l \t2, \sym
ldr \rd, [\t1, \t2] #else
ldr_this_cpu_armv6 \rd, \sym #endif
.endm
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.