Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Firefox/third_party/simde/simde/x86/   (Browser von der Mozilla Stiftung Version 136.0.1©)  Datei vom 10.2.2025 mit Größe 162 kB image not shown  

Quelle  sse.h   Sprache: C

 
/* SPDX-License-Identifier: MIT
 *
 * Permission is hereby granted, free of charge, to any person
 * obtaining a copy of this software and associated documentation
 * files (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy,
 * modify, merge, publish, distribute, sublicense, and/or sell copies
 * of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be
 * included in all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 * Copyright:
 *   2017-2020 Evan Nemerson <evan@nemerson.com>
 *   2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
 *   2015      Brandon Rowlett <browlett@nvidia.com>
 *   2015      Ken Fast <kfast@gdeb.com>
 */


#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H

#include "mmx.h"

#if defined(_WIN32) && !defined(SIMDE_X86_SSE_NATIVE) && defined(_MSC_VER)
  #define NOMINMAX
  #include <windows.h>
#endif

#if defined(__ARM_ACLE)
  #include <arm_acle.h>
#endif

HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_

typedef union {
  #if defined(SIMDE_VECTOR_SUBSCRIPT)
    SIMDE_ALIGN_TO_16 int8_t          i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
    SIMDE_ALIGN_TO_16 int16_t        i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
    SIMDE_ALIGN_TO_16 int32_t        i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
    SIMDE_ALIGN_TO_16 int64_t        i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
    SIMDE_ALIGN_TO_16 uint8_t         u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
    SIMDE_ALIGN_TO_16 uint16_t       u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
    SIMDE_ALIGN_TO_16 uint32_t       u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
    SIMDE_ALIGN_TO_16 uint64_t       u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
    #if defined(SIMDE_HAVE_INT128_)
    SIMDE_ALIGN_TO_16 simde_int128  i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
    SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
    #endif
    SIMDE_ALIGN_TO_16 simde_float32  f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
    SIMDE_ALIGN_TO_16 int_fast32_t  i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
    SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  #else
    SIMDE_ALIGN_TO_16 int8_t         i8[16];
    SIMDE_ALIGN_TO_16 int16_t        i16[8];
    SIMDE_ALIGN_TO_16 int32_t        i32[4];
    SIMDE_ALIGN_TO_16 int64_t        i64[2];
    SIMDE_ALIGN_TO_16 uint8_t        u8[16];
    SIMDE_ALIGN_TO_16 uint16_t       u16[8];
    SIMDE_ALIGN_TO_16 uint32_t       u32[4];
    SIMDE_ALIGN_TO_16 uint64_t       u64[2];
    #if defined(SIMDE_HAVE_INT128_)
    SIMDE_ALIGN_TO_16 simde_int128  i128[1];
    SIMDE_ALIGN_TO_16 simde_uint128 u128[1];
    #endif
    SIMDE_ALIGN_TO_16 simde_float32  f32[4];
    SIMDE_ALIGN_TO_16 int_fast32_t  i32f[16 / sizeof(int_fast32_t)];
    SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
  #endif

    SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
    SIMDE_ALIGN_TO_16 simde__m64         m64[2];

  #if defined(SIMDE_X86_SSE_NATIVE)
    SIMDE_ALIGN_TO_16 __m128         n;
  #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
    SIMDE_ALIGN_TO_16 int8x16_t      neon_i8;
    SIMDE_ALIGN_TO_16 int16x8_t      neon_i16;
    SIMDE_ALIGN_TO_16 int32x4_t      neon_i32;
    SIMDE_ALIGN_TO_16 int64x2_t      neon_i64;
    SIMDE_ALIGN_TO_16 uint8x16_t     neon_u8;
    SIMDE_ALIGN_TO_16 uint16x8_t     neon_u16;
    SIMDE_ALIGN_TO_16 uint32x4_t     neon_u32;
    SIMDE_ALIGN_TO_16 uint64x2_t     neon_u64;
    SIMDE_ALIGN_TO_16 float32x4_t    neon_f32;
    #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
      SIMDE_ALIGN_TO_16 float64x2_t    neon_f64;
    #endif
  #elif defined(SIMDE_MIPS_MSA_NATIVE)
    v16i8 msa_i8;
    v8i16 msa_i16;
    v4i32 msa_i32;
    v2i64 msa_i64;
    v16u8 msa_u8;
    v8u16 msa_u16;
    v4u32 msa_u32;
    v2u64 msa_u64;
  #elif defined(SIMDE_WASM_SIMD128_NATIVE)
    SIMDE_ALIGN_TO_16 v128_t         wasm_v128;
  #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
    SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char)      altivec_u8;
    SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short)     altivec_u16;
    SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int)       altivec_u32;
    SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char)        altivec_i8;
    SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short)       altivec_i16;
    SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int)         altivec_i32;
    SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float)              altivec_f32;
    #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
      SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
      SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long)   altivec_i64;
      SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double)             altivec_f64;
    #endif
  #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
    v16i8 lsx_i8;
    v8i16 lsx_i16;
    v4i32 lsx_i32;
    v2i64 lsx_i64;
    v16u8 lsx_u8;
    v8u16 lsx_u16;
    v4u32 lsx_u32;
    v2u64 lsx_u64;
    v4f32 lsx_f32;
    v2f64 lsx_f64;
  #endif
} simde__m128_private;

#if defined(SIMDE_X86_SSE_NATIVE)
  typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
   typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
   typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
   typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
  typedef v4f32 simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
  typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
  typedef simde__m128_private simde__m128;
#endif

#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
  typedef simde__m128 __m128;
#endif

HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
  simde__m128 r;
  simde_memcpy(&r, &v, sizeof(r));
  return r;
}

SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
  simde__m128_private r;
  simde_memcpy(&r, &v, sizeof(r));
  return r;
}

#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
  #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
    SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
  #endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */

#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)

  #if defined(SIMDE_BUG_GCC_95782)
    SIMDE_FUNCTION_ATTRIBUTES
    SIMDE_POWER_ALTIVEC_VECTOR(float)
    simde__m128_to_altivec_f32(simde__m128 value) {
      simde__m128_private r_ = simde__m128_to_private(value);
      return r_.altivec_f32;
    }

    SIMDE_FUNCTION_ATTRIBUTES
    simde__m128
    simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
      simde__m128_private r_;
      r_.altivec_f32 = value;
      return simde__m128_from_private(r_);
    }
  #else
    SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
  #endif

  #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
    SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
    SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
  #endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128);
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */

#if defined(SIMDE_LOONGARCH_LSX_NATIVE)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v16i8, lsx, i8)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v8i16, lsx, i16)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v4i32, lsx, i32)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v2i64, lsx, i64)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v16u8, lsx, u8)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v8u16, lsx, u16)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v4u32, lsx, u32)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v2u64, lsx, u64)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v4f32, lsx, f32)
  SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v2f64, lsx, f64)
#endif /* defined(SIMDE_LOONGARCH_LSX_NATIVE) */

enum {
  #if defined(SIMDE_X86_SSE_NATIVE)
    SIMDE_MM_ROUND_NEAREST     = _MM_ROUND_NEAREST,
    SIMDE_MM_ROUND_DOWN        = _MM_ROUND_DOWN,
    SIMDE_MM_ROUND_UP          = _MM_ROUND_UP,
    SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
  #else
    SIMDE_MM_ROUND_NEAREST     = 0x0000,
    SIMDE_MM_ROUND_DOWN        = 0x2000,
    SIMDE_MM_ROUND_UP          = 0x4000,
    SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
  #endif
};

#if defined(_MM_FROUND_TO_NEAREST_INT)
#  define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
#  define SIMDE_MM_FROUND_TO_NEG_INF     _MM_FROUND_TO_NEG_INF
#  define SIMDE_MM_FROUND_TO_POS_INF     _MM_FROUND_TO_POS_INF
#  define SIMDE_MM_FROUND_TO_ZERO        _MM_FROUND_TO_ZERO
#  define SIMDE_MM_FROUND_CUR_DIRECTION  _MM_FROUND_CUR_DIRECTION

#  define SIMDE_MM_FROUND_RAISE_EXC      _MM_FROUND_RAISE_EXC
#  define SIMDE_MM_FROUND_NO_EXC         _MM_FROUND_NO_EXC
#else
#  define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
#  define SIMDE_MM_FROUND_TO_NEG_INF     0x01
#  define SIMDE_MM_FROUND_TO_POS_INF     0x02
#  define SIMDE_MM_FROUND_TO_ZERO        0x03
#  define SIMDE_MM_FROUND_CUR_DIRECTION  0x04

#  define SIMDE_MM_FROUND_RAISE_EXC      0x00
#  define SIMDE_MM_FROUND_NO_EXC         0x08
#endif

#define SIMDE_MM_FROUND_NINT \
  (SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
  (SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
  (SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
  (SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
  (SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
  (SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)

#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
#  define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
#  define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
#  define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
#  define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
#  define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
#  define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
#  define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
#  define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
#  define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
#  define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
#  define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
#  define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif

#if defined(_MM_EXCEPT_INVALID)
#  define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
#  define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
#  define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
#  define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
#  define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
#  define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
#  define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
#  define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
#  define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
#  define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
#  define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
#  define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
#  define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
#  define SIMDE_MM_EXCEPT_MASK \
     (SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
      SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
      SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
  #define _MM_EXCEPT_INVALID SIMDE_MM_EXCEPT_INVALID
  #define _MM_EXCEPT_DENORM SIMDE_MM_EXCEPT_DENORM
  #define _MM_EXCEPT_DIV_ZERO SIMDE_MM_EXCEPT_DIV_ZERO
  #define _MM_EXCEPT_OVERFLOW SIMDE_MM_EXCEPT_OVERFLOW
  #define _MM_EXCEPT_UNDERFLOW SIMDE_MM_EXCEPT_UNDERFLOW
  #define _MM_EXCEPT_INEXACT SIMDE_MM_EXCEPT_INEXACT
  #define _MM_EXCEPT_MASK SIMDE_MM_EXCEPT_MASK
#endif

#if defined(_MM_MASK_INVALID)
#  define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
#  define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
#  define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
#  define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
#  define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
#  define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
#  define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
#  define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
#  define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
#  define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
#  define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
#  define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
#  define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
#  define SIMDE_MM_MASK_MASK \
     (SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
      SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
      SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
  #define _MM_MASK_INVALID SIMDE_MM_MASK_INVALID
  #define _MM_MASK_DENORM SIMDE_MM_MASK_DENORM
  #define _MM_MASK_DIV_ZERO SIMDE_MM_MASK_DIV_ZERO
  #define _MM_MASK_OVERFLOW SIMDE_MM_MASK_OVERFLOW
  #define _MM_MASK_UNDERFLOW SIMDE_MM_MASK_UNDERFLOW
  #define _MM_MASK_INEXACT SIMDE_MM_MASK_INEXACT
  #define _MM_MASK_MASK SIMDE_MM_MASK_MASK
#endif

#if defined(_MM_FLUSH_ZERO_MASK)
#  define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
#  define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
#  define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
#  define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
#  define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
#  define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
  #define _MM_FLUSH_ZERO_MASK SIMDE_MM_FLUSH_ZERO_MASK
  #define _MM_FLUSH_ZERO_ON SIMDE_MM_FLUSH_ZERO_ON
  #define _MM_FLUSH_ZERO_OFF SIMDE_MM_FLUSH_ZERO_OFF
#endif

SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _MM_GET_ROUNDING_MODE();
  #elif defined(SIMDE_HAVE_FENV_H)
    unsigned int vfe_mode;

    switch (fegetround()) {
      #if defined(FE_TONEAREST)
        case FE_TONEAREST:
          vfe_mode = SIMDE_MM_ROUND_NEAREST;
          break;
      #endif

      #if defined(FE_TOWARDZERO)
        case FE_TOWARDZERO:
          vfe_mode = SIMDE_MM_ROUND_DOWN;
          break;
      #endif

      #if defined(FE_UPWARD)
        case FE_UPWARD:
          vfe_mode = SIMDE_MM_ROUND_UP;
          break;
      #endif

      #if defined(FE_DOWNWARD)
        case FE_DOWNWARD:
          vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
          break;
      #endif

      default:
        vfe_mode = SIMDE_MM_ROUND_NEAREST;
        break;
    }

    return vfe_mode;
  #else
    return SIMDE_MM_ROUND_NEAREST;
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
  #define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif

SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    _MM_SET_ROUNDING_MODE(a);
  #elif defined(SIMDE_HAVE_FENV_H)
    int fe_mode = FE_TONEAREST;

    switch (a) {
      #if defined(FE_TONEAREST)
        case SIMDE_MM_ROUND_NEAREST:
          fe_mode = FE_TONEAREST;
          break;
      #endif

      #if defined(FE_TOWARDZERO)
        case SIMDE_MM_ROUND_TOWARD_ZERO:
          fe_mode = FE_TOWARDZERO;
          break;
      #endif

      #if defined(FE_DOWNWARD)
        case SIMDE_MM_ROUND_DOWN:
          fe_mode = FE_DOWNWARD;
          break;
      #endif

      #if defined(FE_UPWARD)
        case SIMDE_MM_ROUND_UP:
          fe_mode = FE_UPWARD;
          break;
      #endif

      default:
        return;
    }

    fesetround(fe_mode);
  #else
    (void) a;
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
  #define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif

SIMDE_FUNCTION_ATTRIBUTES
uint32_t
SIMDE_MM_GET_FLUSH_ZERO_MODE (void) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
  #else
    return SIMDE_MM_FLUSH_ZERO_OFF;
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
  #define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif

SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_FLUSH_ZERO_MODE (uint32_t a) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    _MM_SET_FLUSH_ZERO_MODE(a);
  #else
    (void) a;
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
  #define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif

SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_getcsr();
  #else
    return SIMDE_MM_GET_ROUNDING_MODE();
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
  #define _mm_getcsr() simde_mm_getcsr()
#endif

SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    _mm_setcsr(a);
  #else
    SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
  #define _mm_setcsr(a) simde_mm_setcsr(a)
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
    SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15)
    SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1) {
  simde__m128_private
    r_,
    a_ = simde__m128_to_private(a);

  (void) lax_rounding;

  /* For architectures which lack a current direction SIMD instruction.
   *
   * Note that NEON actually has a current rounding mode instruction,
   * but in ARMv8+ the rounding mode is ignored and nearest is always
   * used, so we treat ARMv7 as having a rounding mode but ARMv8 as
   * not. */

  #if \
      defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
      defined(SIMDE_ARM_NEON_A32V8)
    if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
      rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
  #endif

  switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
    case SIMDE_MM_FROUND_CUR_DIRECTION:
      #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
        r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
      #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
        r_.neon_f32 = vrndiq_f32(a_.neon_f32);
      #elif defined(SIMDE_WASM_SIMD128_NATIVE)
        r_.wasm_v128 = wasm_f32x4_nearest(a_.wasm_v128);
      #elif defined(simde_math_nearbyintf)
        SIMDE_VECTORIZE
        for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
          r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
        }
      #else
        HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
      #endif
      break;

    case SIMDE_MM_FROUND_TO_NEAREST_INT:
      #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
        r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32));
      #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
        r_.neon_f32 = vrndnq_f32(a_.neon_f32);
      #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
        r_.lsx_i64 = __lsx_vfrintrne_s(a_.lsx_f32);
      #elif defined(SIMDE_WASM_SIMD128_NATIVE)
        r_.wasm_v128 = wasm_f32x4_nearest(a_.wasm_v128);
      #elif defined(simde_math_roundevenf)
        SIMDE_VECTORIZE
        for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
          r_.f32[i] = simde_math_roundevenf(a_.f32[i]);
        }
      #else
        HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
      #endif
      break;

    case SIMDE_MM_FROUND_TO_NEG_INF:
      #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
        r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
      #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
        r_.neon_f32 = vrndmq_f32(a_.neon_f32);
      #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
        r_.lsx_i64 = __lsx_vfrintrm_s(a_.lsx_f32);
      #elif defined(SIMDE_WASM_SIMD128_NATIVE)
        r_.wasm_v128 = wasm_f32x4_floor(a_.wasm_v128);
      #elif defined(simde_math_floorf)
        SIMDE_VECTORIZE
        for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
          r_.f32[i] = simde_math_floorf(a_.f32[i]);
        }
      #else
        HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
      #endif
      break;

    case SIMDE_MM_FROUND_TO_POS_INF:
      #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
        r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
      #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
        r_.neon_f32 = vrndpq_f32(a_.neon_f32);
      #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
        r_.lsx_i64 = __lsx_vfrintrp_s(a_.lsx_f32);
      #elif defined(SIMDE_WASM_SIMD128_NATIVE)
        r_.wasm_v128 = wasm_f32x4_ceil(a_.wasm_v128);
      #elif defined(simde_math_ceilf)
        SIMDE_VECTORIZE
        for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
          r_.f32[i] = simde_math_ceilf(a_.f32[i]);
        }
      #else
        HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
      #endif
      break;

    case SIMDE_MM_FROUND_TO_ZERO:
      #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
        r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
      #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
        r_.neon_f32 = vrndq_f32(a_.neon_f32);
      #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
        r_.lsx_i64 = __lsx_vfrintrz_s(a_.lsx_f32);
      #elif defined(SIMDE_WASM_SIMD128_NATIVE)
        r_.wasm_v128 = wasm_f32x4_trunc(a_.wasm_v128);
      #elif defined(simde_math_truncf)
        SIMDE_VECTORIZE
        for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
          r_.f32[i] = simde_math_truncf(a_.f32[i]);
        }
      #else
        HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
      #endif
      break;

    default:
      HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
  }

  return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE4_1_NATIVE)
  #define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding))
#else
  #define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
  #define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_set_ps(e3, e2, e1, e0);
  #else
    simde__m128_private r_;

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 };
      r_.neon_f32 = vld1q_f32(data);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
    #else
      r_.f32[0] = e0;
      r_.f32[1] = e1;
      r_.f32[2] = e2;
      r_.f32[3] = e3;
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_set_ps1(a);
  #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
    return vdupq_n_f32(a);
  #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
    (void) a;
    return vec_splats(a);
  #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
    return (simde__m128)__lsx_vldrepl_w(&a, 0);
  #elif defined(SIMDE_WASM_SIMD128_NATIVE)
    return wasm_f32x4_splat(a);
  #else
    return simde_mm_set_ps(a, a, a, a);
  #endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_set_ps1(a) simde_mm_set_ps1(a)
#  define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_move_ss(a, b);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
      static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) m = { ~0U, 0U, 0U, 0U };
      r_.altivec_f32 = vec_sel(a_.altivec_f32, b_.altivec_f32, m);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_i8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vextrins_w(a_.lsx_i64, b_.lsx_i64, 0);
    #elif defined(SIMDE_SHUFFLE_VECTOR_)
      r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
    #else
      r_.f32[0] = b_.f32[0];
      r_.f32[1] = a_.f32[1];
      r_.f32[2] = a_.f32[2];
      r_.f32[3] = a_.f32[3];
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_broadcastlow_ps(simde__m128 a) {
  /* This function broadcasts the first element in the inpu vector to
   * all lanes.  It is used to avoid generating spurious exceptions in
   * *_ss functions since there may be garbage in the upper lanes. */


  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_shuffle_ps(a, a, 0);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a);

    #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
      r_.neon_f32 = vdupq_laneq_f32(a_.neon_f32, 0);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
      r_.altivec_f32 = vec_splat(a_.altivec_f32, 0);
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vreplvei_w(a_.lsx_i64, 0);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_f32x4_splat(a_.f32[0]);
    #elif defined(SIMDE_SHUFFLE_VECTOR_)
      r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
        r_.f32[i] = a_.f32[0];
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_add_ps(a, b);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
      r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_f32 = __lsx_vfadd_s(a_.lsx_f32, b_.lsx_f32);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
      r_.f32 = a_.f32 + b_.f32;
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
        r_.f32[i] = a_.f32[i] + b_.f32[i];
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_add_ss(a, b);
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
    return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
    return simde_mm_move_ss(a, simde_mm_add_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
      float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
      // the upper values in the result must be the remnants of <a>.
      r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
    #else
      r_.f32[0] = a_.f32[0] + b_.f32[0];
      r_.f32[1] = a_.f32[1];
      r_.f32[2] = a_.f32[2];
      r_.f32[3] = a_.f32[3];
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_and_ps(a, b);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vand_v(a_.lsx_i64, b_.lsx_i64);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
      r_.i32 = a_.i32 & b_.i32;
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
      r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
        r_.i32[i] = a_.i32[i] & b_.i32[i];
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_andnot_ps(a, b);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
      r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vandn_v(a_.lsx_i64, b_.lsx_i64);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
      r_.i32 = ~a_.i32 & b_.i32;
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
        r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_xor_ps(a, b);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
      r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vxor_v(a_.lsx_i64, b_.lsx_i64);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
      r_.i32f = a_.i32f ^ b_.i32f;
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
        r_.u32[i] = a_.u32[i] ^ b_.u32[i];
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_or_ps(a, b);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
      r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vor_v(a_.lsx_i64, b_.lsx_i64);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
      r_.i32f = a_.i32f | b_.i32f;
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
        r_.u32[i] = a_.u32[i] | b_.u32[i];
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
  #if defined(SIMDE_X86_AVX512VL_NATIVE)
    __m128i ai = _mm_castps_si128(a);
    return _mm_castsi128_ps(_mm_ternarylogic_epi32(ai, ai, ai, 0x55));
  #elif defined(SIMDE_X86_SSE2_NATIVE)
    /* Note: we use ints instead of floats because we don't want cmpeq
     * to return false for (NaN, NaN) */

    __m128i ai = _mm_castps_si128(a);
    return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_i32 = vmvnq_s32(a_.neon_i32);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
      r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vnor_v(a_.lsx_i64, a_.lsx_i64);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
      r_.i32 = ~a_.i32;
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
        r_.i32[i] = ~(a_.i32[i]);
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
  /* This function is for when you want to blend two elements together
   * according to a mask.  It is similar to _mm_blendv_ps, except that
   * it is undefined whether the blend is based on the highest bit in
   * each lane (like blendv) or just bitwise operations.  This allows
   * us to implement the function efficiently everywhere.
   *
   * Basically, you promise that all the lanes in mask are either 0 or
   * ~0. */

  #if defined(SIMDE_X86_SSE4_1_NATIVE)
    return _mm_blendv_ps(a, b, mask);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b),
      mask_ = simde__m128_to_private(mask);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
      r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vbitsel_v(a_.lsx_i64, b_.lsx_i64, mask_.lsx_i64);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
      r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
        r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}

SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
  #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
    return _mm_avg_pu16(a, b);
  #else
    simde__m64_private
      r_,
      a_ = simde__m64_to_private(a),
      b_ = simde__m64_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761)
      uint32_t wa SIMDE_VECTOR(16);
      uint32_t wb SIMDE_VECTOR(16);
      uint32_t wr SIMDE_VECTOR(16);
      SIMDE_CONVERT_VECTOR_(wa, a_.u16);
      SIMDE_CONVERT_VECTOR_(wb, b_.u16);
      wr = (wa + wb + 1) >> 1;
      SIMDE_CONVERT_VECTOR_(r_.u16, wr);
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
        r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
      }
    #endif

    return simde__m64_from_private(r_);
  #endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
#  define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
  #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
    return _mm_avg_pu8(a, b);
  #else
    simde__m64_private
      r_,
      a_ = simde__m64_to_private(a),
      b_ = simde__m64_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761)
      uint16_t wa SIMDE_VECTOR(16);
      uint16_t wb SIMDE_VECTOR(16);
      uint16_t wr SIMDE_VECTOR(16);
      SIMDE_CONVERT_VECTOR_(wa, a_.u8);
      SIMDE_CONVERT_VECTOR_(wb, b_.u8);
      wr = (wa + wb + 1) >> 1;
      SIMDE_CONVERT_VECTOR_(r_.u8, wr);
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
        r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
      }
    #endif

    return simde__m64_from_private(r_);
  #endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
#  define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    simde_float32 mask_;
    uint32_t u32_ = UINT32_C(0x7FFFFFFF);
    simde_memcpy(&mask_, &u32_, sizeof(u32_));
    return _mm_and_ps(_mm_set1_ps(mask_), a);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_f32 = vabsq_f32(a_.neon_f32);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
      r_.altivec_f32 = vec_abs(a_.altivec_f32);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
        r_.f32[i] = simde_math_fabsf(a_.f32[i]);
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_cmpeq_ps(a, b);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
      r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vfcmp_ceq_s(a_.lsx_f32, b_.lsx_f32);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
      r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
        r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_cmpeq_ss(a, b);
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
    return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
    return simde_mm_move_ss(a, simde_mm_cmpeq_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
    SIMDE_VECTORIZE
    for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
      r_.u32[i] = a_.u32[i];
    }

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_cmpge_ps(a, b);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
      r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vfcmp_cle_s(b_.lsx_f32, a_.lsx_f32);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
      r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
        r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
    return _mm_cmpge_ss(a, b);
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
    return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
    return simde_mm_move_ss(a, simde_mm_cmpge_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
    SIMDE_VECTORIZE
    for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
      r_.u32[i] = a_.u32[i];
    }

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_cmpgt_ps(a, b);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
      r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vfcmp_clt_s(b_.lsx_f32, a_.lsx_f32);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
      r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
        r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
    return _mm_cmpgt_ss(a, b);
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
    return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
    return simde_mm_move_ss(a, simde_mm_cmpgt_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
    SIMDE_VECTORIZE
    for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
      r_.u32[i] = a_.u32[i];
    }

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_cmple_ps(a, b);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
      r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vfcmp_cle_s(a_.lsx_f32, b_.lsx_f32);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
      r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
        r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_cmple_ss(a, b);
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
    return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
    return simde_mm_move_ss(a, simde_mm_cmple_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
    SIMDE_VECTORIZE
    for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
      r_.u32[i] = a_.u32[i];
    }

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_cmplt_ps(a, b);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
      r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vfcmp_clt_s(a_.lsx_f32, b_.lsx_f32);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
      r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
        r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_cmplt_ss(a, b);
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
    return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
    return simde_mm_move_ss(a, simde_mm_cmplt_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
    SIMDE_VECTORIZE
    for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
      r_.u32[i] = a_.u32[i];
    }

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_cmpneq_ps(a, b);
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
    #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
      r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
      r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32));
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vfcmp_cune_s(a_.lsx_f32, b_.lsx_f32);
    #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
      r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
    #else
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
        r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
      }
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_cmpneq_ss(a, b);
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
    return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
    return simde_mm_move_ss(a, simde_mm_cmpneq_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
    SIMDE_VECTORIZE
    for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
      r_.u32[i] = a_.u32[i];
    }

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
  return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
  return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
  return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
  return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
  return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
  return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
  return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
  return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_cmpord_ps(a, b);
  #elif defined(SIMDE_WASM_SIMD128_NATIVE)
    return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      /* Note: NEON does not have ordered compare builtin
        Need to compare a eq a and b eq b to check for NaN
        Do AND of results to get final */

      uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
      uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
      r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128));
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
      r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
          vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vfcmp_cun_s(a_.lsx_f32, b_.lsx_f32);
      r_.lsx_i64 = __lsx_vnor_v(r_.lsx_i64, r_.lsx_i64);
    #elif defined(simde_math_isnanf)
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
        r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
      }
    #else
      HEDLEY_UNREACHABLE();
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_cmpunord_ps(a, b);
  #elif defined(SIMDE_WASM_SIMD128_NATIVE)
    return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
      uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
      r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128));
    #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
      r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
          vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
    #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
      r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
          vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
      r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32);
    #elif defined(SIMDE_LOONGARCH_LSX_NATIVE)
      r_.lsx_i64 = __lsx_vfcmp_cun_s(a_.lsx_f32, b_.lsx_f32);
    #elif defined(simde_math_isnanf)
      SIMDE_VECTORIZE
      for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
        r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
      }
    #else
      HEDLEY_UNREACHABLE();
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
    return _mm_cmpunord_ss(a, b);
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
    return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
  #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
    return simde_mm_move_ss(a, simde_mm_cmpunord_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
  #else
    simde__m128_private
      r_,
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(simde_math_isnanf)
      r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
      SIMDE_VECTORIZE
      for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
        r_.u32[i] = a_.u32[i];
      }
    #else
      HEDLEY_UNREACHABLE();
    #endif

    return simde__m128_from_private(r_);
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_comieq_ss(a, b);
  #else
    simde__m128_private
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
      uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
      uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
      uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
      return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      return wasm_f32x4_extract_lane(a_.wasm_v128, 0) == wasm_f32x4_extract_lane(b_.wasm_v128, 0);
    #else
      return a_.f32[0] == b_.f32[0];
    #endif
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_comige_ss(a, b);
  #else
    simde__m128_private
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
      uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
      uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
      uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
      return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
      return wasm_f32x4_extract_lane(a_.wasm_v128, 0) >= wasm_f32x4_extract_lane(b_.wasm_v128, 0);
    #else
      return a_.f32[0] >= b_.f32[0];
    #endif
  #endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#  define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif

SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
  #if defined(SIMDE_X86_SSE_NATIVE)
    return _mm_comigt_ss(a, b);
  #else
    simde__m128_private
      a_ = simde__m128_to_private(a),
      b_ = simde__m128_to_private(b);

    #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
      uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
      uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
      uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
      uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
      return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
    #elif defined(SIMDE_WASM_SIMD128_NATIVE)
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=99 H=93 G=95

¤ Dauer der Verarbeitung: 0.23 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.