/** * __ffs - find first set bit in a long word * @word: The word to search * * Undefined if no set bit exists, so code should check against 0 first.
*/ #define __ffs(word) \
(__builtin_constant_p(word) ? \
(unsignedlong)__builtin_ctzl(word) : \
variable__ffs(word))
/** * __fls - find last set bit in a long word * @word: the word to search * * Undefined if no set bit exists, so code should check against 0 first.
*/ #define __fls(word) \
(__builtin_constant_p(word) ? \
(unsignedlong)(BITS_PER_LONG - 1 - __builtin_clzl(word)) : \
variable__fls(word))
/** * ffs - find first set bit in a word * @x: the word to search * * This is defined the same way as the libc and compiler builtin ffs routines. * * ffs(value) returns 0 if value is 0 or the position of the first set bit if * value is nonzero. The first (least significant) bit is at position 1.
*/ #define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x))
/** * fls - find last set bit in a word * @x: the word to search * * This is defined in a similar way as ffs, but returns the position of the most * significant set bit. * * fls(value) returns 0 if value is 0 or the position of the last set bit if * value is nonzero. The last (most significant) bit is at position 32.
*/ #define fls(x) \
({ \
typeof(x) x_ = (x); \
__builtin_constant_p(x_) ? \
((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \
: \
variable_fls(x_); \
})
/** * arch_test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This is an atomic fully-ordered operation (implied full memory barrier).
*/ static __always_inline int arch_test_and_set_bit(int nr, volatileunsignedlong *addr)
{ return __test_and_op_bit(or, __NOP, nr, addr);
}
/** * arch_test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This is an atomic fully-ordered operation (implied full memory barrier).
*/ static __always_inline int arch_test_and_clear_bit(int nr, volatileunsignedlong *addr)
{ return __test_and_op_bit(and, __NOT, nr, addr);
}
/** * arch_test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier.
*/ static __always_inline int arch_test_and_change_bit(int nr, volatileunsignedlong *addr)
{ return __test_and_op_bit(xor, __NOP, nr, addr);
}
/** * arch_set_bit - Atomically set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * Note: there are no guarantees that this function will not be reordered * on non x86 architectures, so if you are writing portable code, * make sure not to rely on its reordering guarantees. * * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity.
*/ static __always_inline void arch_set_bit(int nr, volatileunsignedlong *addr)
{
__op_bit(or, __NOP, nr, addr);
}
/** * arch_clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * Note: there are no guarantees that this function will not be reordered * on non x86 architectures, so if you are writing portable code, * make sure not to rely on its reordering guarantees.
*/ static __always_inline void arch_clear_bit(int nr, volatileunsignedlong *addr)
{
__op_bit(and, __NOT, nr, addr);
}
/** * arch_change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from * * change_bit() may be reordered on other architectures than x86. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity.
*/ static __always_inline void arch_change_bit(int nr, volatileunsignedlong *addr)
{
__op_bit(xor, __NOP, nr, addr);
}
/** * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and provides acquire barrier semantics. * It can be used to implement bit locks.
*/ static __always_inline int arch_test_and_set_bit_lock( unsignedlong nr, volatileunsignedlong *addr)
{ return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
}
/** * arch_clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics.
*/ static __always_inline void arch_clear_bit_unlock( unsignedlong nr, volatileunsignedlong *addr)
{
__op_bit_ord(and, __NOT, nr, addr, .rl);
}
/** * arch___clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is like clear_bit_unlock, however it is not atomic. * It does provide release barrier semantics so it can be used to unlock * a bit lock, however it would only be used if no other CPU can modify * any bits in the memory until the lock is released (a good example is * if the bit lock itself protects access to the other bits in the word). * * On RISC-V systems there seems to be no benefit to taking advantage of the * non-atomic property here: it's a lot more instructions and we still have to * provide release semantics anyway.
*/ static __always_inline void arch___clear_bit_unlock( unsignedlong nr, volatileunsignedlong *addr)
{
arch_clear_bit_unlock(nr, addr);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.