/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. * Copyright (c) 2008-2018 Ivan Maidanski * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * * Some of the machine specific code was borrowed from our GC distribution.
*/
# ifdefined(__APPLE_CC__) /* OS X 10.7 clang-425 lacks __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n */ /* predefined macro (unlike e.g. OS X 10.11 clang-703). */ # define AO_GCC_FORCE_HAVE_CAS
# ifdef __x86_64__ # if !AO_CLANG_PREREQ(9, 0) /* < Apple clang-900 */ /* Older Apple clang (e.g., clang-600 based on LLVM 3.5svn) had */ /* some bug in the double word CAS implementation for x64. */ # define AO_SKIPATOMIC_double_compare_and_swap_ANY # endif
# elif defined(__MACH__) /* OS X 10.8 lacks __atomic_load/store symbols for arch i386 */ /* (even with a non-Apple clang). */ # ifndef MAC_OS_X_VERSION_MIN_REQUIRED /* Include this header just to import the version macro. */ # include <AvailabilityMacros.h> # endif # if MAC_OS_X_VERSION_MIN_REQUIRED < 1090 /* MAC_OS_X_VERSION_10_9 */ # define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY # endif # endif /* __i386__ */
# elif defined(__clang__) # if !defined(__x86_64__) # if !defined(AO_PREFER_BUILTIN_ATOMICS) && !defined(__CYGWIN__) \
&& !AO_CLANG_PREREQ(5, 0) /* At least clang-3.8/i686 (from NDK r11c) required to specify */ /* -latomic in case of a double-word atomic operation use. */ # define AO_SKIPATOMIC_double_compare_and_swap_ANY # define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY # endif /* !AO_PREFER_BUILTIN_ATOMICS */
# elif !defined(__ILP32__) # if (!AO_CLANG_PREREQ(3, 5) && !defined(AO_PREFER_BUILTIN_ATOMICS)) \
|| (!AO_CLANG_PREREQ(4, 0) && defined(AO_ADDRESS_SANITIZER)) \
|| defined(AO_THREAD_SANITIZER) /* clang-3.4/x64 required -latomic. clang-3.9/x64 seems to */ /* pass double-wide arguments to atomic operations incorrectly */ /* in case of ASan/TSan. */ /* TODO: As of clang-4.0, lock-free test_stack fails if TSan. */ # define AO_SKIPATOMIC_double_compare_and_swap_ANY # define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY # endif # endif /* __x86_64__ */
# elif AO_GNUC_PREREQ(7, 0) && !defined(AO_PREFER_BUILTIN_ATOMICS) \
&& !defined(AO_THREAD_SANITIZER) && !defined(__MINGW32__) /* gcc-7.x/x64 (gcc-7.2, at least) requires -latomic flag in case */ /* of double-word atomic operations use (but not in case of TSan). */ /* TODO: Revise it for the future gcc-7 releases. */ # define AO_SKIPATOMIC_double_compare_and_swap_ANY # define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY # endif /* __GNUC__ && !__clang__ */
/* The following really assume we have a 486 or better. Unfortunately */ /* gcc doesn't define a suitable feature test macro based on command */ /* line options. */ /* We should perhaps test dynamically. */
#include"../all_aligned_atomic_load_store.h"
#include"../test_and_set_t_is_char.h"
#ifdefined(__SSE2__) && !defined(AO_USE_PENTIUM4_INSTRS) /* "mfence" is a part of SSE2 set (introduced on Intel Pentium 4). */ # define AO_USE_PENTIUM4_INSTRS #endif
#else /* We could use the cpuid instruction. But that seems to be slower */ /* than the default implementation based on test_and_set_full. Thus */ /* we omit that bit of misinformation here. */ #endif/* !AO_USE_PENTIUM4_INSTRS */
/* As far as we can tell, the lfence and sfence instructions are not */ /* currently needed or useful for cached memory accesses. */
/* Really only works for 486 and later */ #ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
AO_t result;
/* AO_store_full could be implemented directly using "xchg" but it */ /* could be generalized efficiently as an ordinary store accomplished */ /* with AO_nop_full ("mfence" instruction). */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{ unsignedchar oldval; /* Note: the "xchg" instruction does not need a "lock" prefix */
__asm__ __volatile__ ("xchgb %0, %1"
: "=q" (oldval), "+m" (*addr)
: "0" ((unsignedchar)0xff)
: "memory"); return (AO_TS_VAL_t)oldval;
} #define AO_HAVE_test_and_set_full
#ifndef AO_GENERALIZE_ASM_BOOL_CAS /* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
{ # ifdef AO_USE_SYNC_CAS_BUILTIN return (int)__sync_bool_compare_and_swap(addr, old, new_val /* empty protection list */); /* Note: an empty list of variables protected by the */ /* memory barrier should mean all globally accessible */ /* variables are protected. */ # else char result; # ifdefined(__GCC_ASM_FLAG_OUTPUTS__)
AO_t dummy;
/* Real X86 implementations, except for some old 32-bit WinChips, */ /* appear to enforce ordering between memory operations, EXCEPT that */ /* a later read can pass earlier writes, presumably due to the */ /* visible presence of store buffers. */ /* We ignore both the WinChips and the fact that the official specs */ /* seem to be much weaker (and arguably too weak to be usable). */ # include "../ordered_except_wr.h"
#elif !defined(__x86_64__) && (!defined(AO_USE_SYNC_CAS_BUILTIN) \
|| defined(AO_GCC_ATOMIC_TEST_AND_SET)) # include "../standard_ao_double_t.h"
/* Reading or writing a quadword aligned on a 64-bit boundary is */ /* always carried out atomically on at least a Pentium according to */ /* Chapter 8.1.1 of Volume 3A Part 1 of Intel processor manuals. */ # ifndef AO_PREFER_GENERALIZED # define AO_ACCESS_double_CHECK_ALIGNED # include "../loadstore/double_atomic_load_store.h" # endif
/* Returns nonzero if the comparison succeeded. */ /* Really requires at least a Pentium. */
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{ char result; # ifdefined(__PIC__) && !(AO_GNUC_PREREQ(5, 1) || AO_CLANG_PREREQ(4, 0))
AO_t saved_ebx;
AO_t dummy;
/* The following applies to an ancient GCC (and, probably, it was */ /* never needed for Clang): */ /* If PIC is turned on, we cannot use ebx as it is reserved for the */ /* GOT pointer. We should save and restore ebx. The proposed */ /* solution is not so efficient as the older alternatives using */ /* push ebx or edi as new_val1 (w/o clobbering edi and temporary */ /* local variable usage) but it is more portable (it works even if */ /* ebx is not used as GOT pointer, and it works for the buggy GCC */ /* releases that incorrectly evaluate memory operands offset in the */ /* inline assembly after push). */ # ifdef __OPTIMIZE__
__asm__ __volatile__("mov %%ebx, %2\n\t"/* save ebx */ "lea %0, %%edi\n\t"/* in case addr is in ebx */ "mov %7, %%ebx\n\t"/* load new_val1 */ "lock; cmpxchg8b (%%edi)\n\t" "mov %2, %%ebx\n\t"/* restore ebx */ "setz %1"
: "+m" (*addr), "=a" (result), "=m" (saved_ebx), "=d" (dummy)
: "d" (old_val2), "a" (old_val1), "c" (new_val2), "m" (new_val1)
: "%edi", "memory"); # else /* A less-efficient code manually preserving edi if GCC invoked */ /* with -O0 option (otherwise it fails while finding a register */ /* in class 'GENERAL_REGS'). */
AO_t saved_edi;
__asm__ __volatile__("mov %%edi, %3\n\t"/* save edi */ "mov %%ebx, %2\n\t"/* save ebx */ "lea %0, %%edi\n\t"/* in case addr is in ebx */ "mov %8, %%ebx\n\t"/* load new_val1 */ "lock; cmpxchg8b (%%edi)\n\t" "mov %2, %%ebx\n\t"/* restore ebx */ "mov %3, %%edi\n\t"/* restore edi */ "setz %1"
: "+m" (*addr), "=a" (result), "=m" (saved_ebx), "=m" (saved_edi), "=d" (dummy)
: "d" (old_val2), "a" (old_val1), "c" (new_val2), "m" (new_val1)
: "memory"); # endif # else /* For non-PIC mode, this operation could be simplified (and be */ /* faster) by using ebx as new_val1. Reuse of the PIC hard */ /* register, instead of using a fixed register, is implemented */ /* in Clang and GCC 5.1+, at least. (Older GCC refused to compile */ /* such code for PIC mode). */ # ifdefined(__GCC_ASM_FLAG_OUTPUTS__)
__asm__ __volatile__ ("lock; cmpxchg8b %0"
: "+m" (*addr), "=@ccz" (result), "+d" (old_val2), "+a" (old_val1)
: "c" (new_val2), "b" (new_val1)
: "memory"); # else
AO_t dummy; /* an output for clobbered edx */
#elifdefined(__ILP32__) || !defined(__x86_64__) # include "../standard_ao_double_t.h"
/* Reading or writing a quadword aligned on a 64-bit boundary is */ /* always carried out atomically (requires at least a Pentium). */ # ifndef AO_PREFER_GENERALIZED # define AO_ACCESS_double_CHECK_ALIGNED # include "../loadstore/double_atomic_load_store.h" # endif
/* X32 has native support for 64-bit integer operations (AO_double_t */ /* is a 64-bit integer and we could use 64-bit cmpxchg). */ /* This primitive is used by compare_double_and_swap_double_full. */
AO_INLINE int
AO_double_compare_and_swap_full(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{ /* It is safe to use __sync CAS built-in here. */ return __sync_bool_compare_and_swap(&addr->AO_whole,
old_val.AO_whole, new_val.AO_whole /* empty protection list */);
} # define AO_HAVE_double_compare_and_swap_full
#elifdefined(AO_CMPXCHG16B_AVAILABLE) \
|| (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) \
&& !defined(AO_THREAD_SANITIZER)) # include "../standard_ao_double_t.h"
/* The Intel and AMD Architecture Programmer Manuals state roughly */ /* the following: */ /* - CMPXCHG16B (with a LOCK prefix) can be used to perform 16-byte */ /* atomic accesses in 64-bit mode (with certain alignment */ /* restrictions); */ /* - SSE instructions that access data larger than a quadword (like */ /* MOVDQA) may be implemented using multiple memory accesses; */ /* - LOCK prefix causes an invalid-opcode exception when used with */ /* 128-bit media (SSE) instructions. */ /* Thus, currently, the only way to implement lock-free double_load */ /* and double_store on x86_64 is to use CMPXCHG16B (if available). */
/* NEC LE-IT: older AMD Opterons are missing this instruction. */ /* On these machines SIGILL will be thrown. */ /* Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated (lock */ /* based) version available. */ /* HB: Changed this to not define either by default. There are */ /* enough machines and tool chains around on which cmpxchg16b */ /* doesn't work. And the emulation is unsafe by our usual rules. */ /* However both are clearly useful in certain cases. */
#elifdefined(AO_WEAK_DOUBLE_CAS_EMULATION) # include "../standard_ao_double_t.h"
# ifdef __cplusplus extern"C" { # endif
/* This one provides spinlock based emulation of CAS implemented in */ /* atomic_ops.c. We probably do not want to do this here, since it */ /* is not atomic with respect to other kinds of updates of *addr. */ /* On the other hand, this may be a useful facility on occasion. */ int AO_compare_double_and_swap_double_emulation( volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.