// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#if ((defined EIGEN_VECTORIZE_AVX) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_MINGW) && (__GXX_ABI_VERSION < 1004)) || EIGEN_OS_QNX // With GCC's default ABI version, a __m128 or __m256 are the same types and therefore we cannot // have overloads for both types without linking error. // One solution is to increase ABI version using -fabi-version=4 (or greater). // Otherwise, we workaround this inconvenience by wrapping 128bit types into the following helper // structure: typedef eigen_packet_wrapper<__m128> Packet4f; typedef eigen_packet_wrapper<__m128d> Packet2d; #else typedef __m128 Packet4f; typedef __m128d Packet2d; #endif
template<int p, int q, int r, int s> struct shuffle_mask{ enum { mask = (s)<<6|(r)<<4|(q)<<2|(p) };
};
// TODO: change the implementation of all swizzle* ops from macro to template, #define vec4f_swizzle1(v,p,q,r,s) \
Packet4f(_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), (shuffle_mask<p,q,r,s>::mask))))
// GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction. // However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203) // Using inline assembly is also not an option because then gcc fails to reorder properly the instructions. // Therefore, we introduced the pload1 functions to be used in product kernels for which bug 203 does not apply. // Also note that with AVX, we want it to generate a vbroadcastss. #if EIGEN_COMP_GNUC_STRICT && (!defined __AVX__) template<> EIGEN_STRONG_INLINE Packet4f pload1<Packet4f>(constfloat *from) { return vec4f_swizzle1(_mm_load_ss(from),0,0,0,0);
} #endif
template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(constfloat& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); } template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(constdouble& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); } template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(constint& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
template<> EIGEN_STRONG_INLINE Packet16b padd<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_or_si128(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); } template<> EIGEN_STRONG_INLINE Packet16b psub<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_xor_si128(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
{ #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_mullo_epi32(a,b); #else // this version is slightly faster than 4 scalar products return vec4i_swizzle1(
vec4i_swizzle2(
_mm_mul_epu32(a,b),
_mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
vec4i_swizzle1(b,1,0,3,2)),
0,2,0,2),
0,2,1,3); #endif
}
template<> EIGEN_STRONG_INLINE Packet16b pmul<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_and_si128(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
// for some weird raisons, it has to be overloaded for packet of integers template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); } #ifdef EIGEN_VECTORIZE_FMA template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); } template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); } #endif
#ifdef EIGEN_VECTORIZE_SSE4_1 template<> EIGEN_DEVICE_FUNC inline Packet4f pselect(const Packet4f& mask, const Packet4f& a, const Packet4f& b) { return _mm_blendv_ps(b,a,mask);
}
template<> EIGEN_DEVICE_FUNC inline Packet4i pselect(const Packet4i& mask, const Packet4i& a, const Packet4i& b) { return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(b),_mm_castsi128_ps(a),_mm_castsi128_ps(mask)));
}
template<> EIGEN_DEVICE_FUNC inline Packet2d pselect(const Packet2d& mask, const Packet2d& a, const Packet2d& b) { return _mm_blendv_pd(b,a,mask); }
template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63 // There appears to be a bug in GCC, by which the optimizer may // flip the argument order in calls to _mm_min_ps, so we have to // resort to inline ASM here. This is supposed to be fixed in gcc6.3, // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 #ifdef EIGEN_VECTORIZE_AVX
Packet4f res; asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); #else
Packet4f res = b; asm("minps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); #endif return res; #else // Arguments are reversed to match NaN propagation behavior of std::min. return _mm_min_ps(b, a); #endif
} template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63 // There appears to be a bug in GCC, by which the optimizer may // flip the argument order in calls to _mm_min_pd, so we have to // resort to inline ASM here. This is supposed to be fixed in gcc6.3, // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 #ifdef EIGEN_VECTORIZE_AVX
Packet2d res; asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); #else
Packet2d res = b; asm("minpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); #endif return res; #else // Arguments are reversed to match NaN propagation behavior of std::min. return _mm_min_pd(b, a); #endif
} template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
{ #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_min_epi32(a,b); #else // after some bench, this version *is* faster than a scalar implementation
Packet4i mask = _mm_cmplt_epi32(a,b); return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); #endif
}
template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63 // There appears to be a bug in GCC, by which the optimizer may // flip the argument order in calls to _mm_max_ps, so we have to // resort to inline ASM here. This is supposed to be fixed in gcc6.3, // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 #ifdef EIGEN_VECTORIZE_AVX
Packet4f res; asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); #else
Packet4f res = b; asm("maxps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); #endif return res; #else // Arguments are reversed to match NaN propagation behavior of std::max. return _mm_max_ps(b, a); #endif
} template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63 // There appears to be a bug in GCC, by which the optimizer may // flip the argument order in calls to _mm_max_pd, so we have to // resort to inline ASM here. This is supposed to be fixed in gcc6.3, // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 #ifdef EIGEN_VECTORIZE_AVX
Packet2d res; asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); #else
Packet2d res = b; asm("maxpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); #endif return res; #else // Arguments are reversed to match NaN propagation behavior of std::max. return _mm_max_pd(b, a); #endif
} template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
{ #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_max_epi32(a,b); #else // after some bench, this version *is* faster than a scalar implementation
Packet4i mask = _mm_cmpgt_epi32(a,b); return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); #endif
}
template <typename Packet, typename Op>
EIGEN_STRONG_INLINE Packet pminmax_propagate_numbers(const Packet& a, const Packet& b, Op op) { // In this implementation, we take advantage of the fact that pmin/pmax for SSE // always return a if either a or b is NaN.
Packet not_nan_mask_a = pcmp_eq(a, a);
Packet m = op(a, b); return pselect<Packet>(not_nan_mask_a, m, b);
}
template <typename Packet, typename Op>
EIGEN_STRONG_INLINE Packet pminmax_propagate_nan(const Packet& a, const Packet& b, Op op) { // In this implementation, we take advantage of the fact that pmin/pmax for SSE // always return a if either a or b is NaN.
Packet not_nan_mask_a = pcmp_eq(a, a);
Packet m = op(b, a); return pselect<Packet>(not_nan_mask_a, m, a);
}
// Add specializations for min/max with prescribed NaN progation. template<>
EIGEN_STRONG_INLINE Packet4f pmin<PropagateNumbers, Packet4f>(const Packet4f& a, const Packet4f& b) { return pminmax_propagate_numbers(a, b, pmin<Packet4f>);
} template<>
EIGEN_STRONG_INLINE Packet2d pmin<PropagateNumbers, Packet2d>(const Packet2d& a, const Packet2d& b) { return pminmax_propagate_numbers(a, b, pmin<Packet2d>);
} template<>
EIGEN_STRONG_INLINE Packet4f pmax<PropagateNumbers, Packet4f>(const Packet4f& a, const Packet4f& b) { return pminmax_propagate_numbers(a, b, pmax<Packet4f>);
} template<>
EIGEN_STRONG_INLINE Packet2d pmax<PropagateNumbers, Packet2d>(const Packet2d& a, const Packet2d& b) { return pminmax_propagate_numbers(a, b, pmax<Packet2d>);
} template<>
EIGEN_STRONG_INLINE Packet4f pmin<PropagateNaN, Packet4f>(const Packet4f& a, const Packet4f& b) { return pminmax_propagate_nan(a, b, pmin<Packet4f>);
} template<>
EIGEN_STRONG_INLINE Packet2d pmin<PropagateNaN, Packet2d>(const Packet2d& a, const Packet2d& b) { return pminmax_propagate_nan(a, b, pmin<Packet2d>);
} template<>
EIGEN_STRONG_INLINE Packet4f pmax<PropagateNaN, Packet4f>(const Packet4f& a, const Packet4f& b) { return pminmax_propagate_nan(a, b, pmax<Packet4f>);
} template<>
EIGEN_STRONG_INLINE Packet2d pmax<PropagateNaN, Packet2d>(const Packet2d& a, const Packet2d& b) { return pminmax_propagate_nan(a, b, pmax<Packet2d>);
}
#if EIGEN_COMP_MSVC template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(constfloat* from) {
EIGEN_DEBUG_UNALIGNED_LOAD #if (EIGEN_COMP_MSVC==1600) // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps // (i.e., it does not generate an unaligned load!!
__m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
res = _mm_loadh_pi(res, (const __m64*)(from+2)); return res; #else return _mm_loadu_ps(from); #endif
} #else // NOTE: with the code below, MSVC's compiler crashes!
// some compilers might be tempted to perform multiple moves instead of using a vector path. template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, constfloat& a)
{
Packet4f pa = _mm_set_ss(a);
pstore(to, Packet4f(vec4f_swizzle1(pa,0,0,0,0)));
} // some compilers might be tempted to perform multiple moves instead of using a vector path. template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, constdouble& a)
{
Packet2d pa = _mm_set_sd(a);
pstore(to, Packet2d(vec2d_swizzle1(pa,0,0)));
}
#if EIGEN_COMP_MSVC_STRICT && EIGEN_OS_WIN64 // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010 // Direct of the struct members fixed bug #62. template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; } template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; } template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } #elif EIGEN_COMP_MSVC_STRICT // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; } template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; } template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } #else template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); } template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); } template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); } #endif template<> EIGEN_STRONG_INLINE bool pfirst<Packet16b>(const Packet16b& a) { int x = _mm_cvtsi128_si32(a); returnstatic_cast<bool>(x & 1); }
// We specialize pldexp here, since the generic implementation uses Packet2l, which is not well // supported by SSE, and has more range than is needed for exponents. template<> EIGEN_STRONG_INLINE Packet2d pldexp<Packet2d>(const Packet2d& a, const Packet2d& exponent) { // Clamp exponent to [-2099, 2099] const Packet2d max_exponent = pset1<Packet2d>(2099.0); const Packet2d e = pmin(pmax(exponent, pnegate(max_exponent)), max_exponent);
// Convert e to integer and swizzle to low-order bits. const Packet4i ei = vec4i_swizzle1(_mm_cvtpd_epi32(e), 0, 3, 1, 3);
// Split 2^e into four factors and multiply: const Packet4i bias = _mm_set_epi32(0, 1023, 0, 1023);
Packet4i b = parithmetic_shift_right<2>(ei); // floor(e/4)
Packet2d c = _mm_castsi128_pd(_mm_slli_epi64(padd(b, bias), 52)); // 2^b
Packet2d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
b = psub(psub(psub(ei, b), b), b); // e - 3b
c = _mm_castsi128_pd(_mm_slli_epi64(padd(b, bias), 52)); // 2^(e - 3b)
out = pmul(out, c); // a * 2^e return out;
}
// mul template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
{
Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a)); return pfirst<Packet4f>(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
} template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
{ return pfirst<Packet2d>(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
} template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
{ // after some experiments, it is seems this is the fastest way to implement it // for GCC (eg., reusing pmul is very slow !) // TODO try to call _mm_mul_epu32 directly
EIGEN_ALIGN16 int aux[4];
pstore(aux, a); return (aux[0] * aux[1]) * (aux[2] * aux[3]);
}
// min template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
{
Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a)); return pfirst<Packet4f>(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
} template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
{ return pfirst<Packet2d>(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
} template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
{ #ifdef EIGEN_VECTORIZE_SSE4_1
Packet4i tmp = _mm_min_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2))); return pfirst<Packet4i>(_mm_min_epi32(tmp,_mm_shuffle_epi32(tmp, 1))); #else // after some experiments, it is seems this is the fastest way to implement it // for GCC (eg., it does not like using std::min after the pstore !!)
EIGEN_ALIGN16 int aux[4];
pstore(aux, a); int aux0 = aux[0]<aux[1] ? aux[0] : aux[1]; int aux2 = aux[2]<aux[3] ? aux[2] : aux[3]; return aux0<aux2 ? aux0 : aux2; #endif// EIGEN_VECTORIZE_SSE4_1
}
// max template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
{
Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a)); return pfirst<Packet4f>(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
} template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
{ return pfirst<Packet2d>(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
} template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
{ #ifdef EIGEN_VECTORIZE_SSE4_1
Packet4i tmp = _mm_max_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2))); return pfirst<Packet4i>(_mm_max_epi32(tmp,_mm_shuffle_epi32(tmp, 1))); #else // after some experiments, it is seems this is the fastest way to implement it // for GCC (eg., it does not like using std::min after the pstore !!)
EIGEN_ALIGN16 int aux[4];
pstore(aux, a); int aux0 = aux[0]>aux[1] ? aux[0] : aux[1]; int aux2 = aux[2]>aux[3] ? aux[2] : aux[3]; return aux0>aux2 ? aux0 : aux2; #endif// EIGEN_VECTORIZE_SSE4_1
}
// Scalar path for pmadd with FMA to ensure consistency with vectorized path. #ifdef EIGEN_VECTORIZE_FMA template<> EIGEN_STRONG_INLINE float pmadd(constfloat& a, constfloat& b, constfloat& c) { return ::fmaf(a,b,c);
} template<> EIGEN_STRONG_INLINE double pmadd(constdouble& a, constdouble& b, constdouble& c) { return ::fma(a,b,c);
} #endif
// Packet math for Eigen::half // Disable the following code since it's broken on too many platforms / compilers. //#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC) #if 0
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.