Mercurial > vec
view src/impl/x86/sse2.c @ 25:92156fe32755
impl/ppc/altivec: update to new implementation
the signed average function is wrong; it needs to round up the number
when only one of them is odd, but that doesn't necessarily seem to be
true because altivec is weird, and that's what we need to emulate the
quirks for. ugh.
also the altivec backend uses the generic functions instead of fallbacks
because it does indeed use the exact same memory structure as the generic
implementation...
author | Paper <paper@tflc.us> |
---|---|
date | Sun, 24 Nov 2024 11:15:59 +0000 |
parents | e49e70f7012f |
children |
line wrap: on
line source
/** * vec - a tiny SIMD vector library in C99 * * Copyright (c) 2024 Paper * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. **/ #include "vec/impl/x86/sse2.h" #include "vec/impl/generic.h" #include <emmintrin.h> #define VEC_SSE2_OPERATION_8x16(op, sign) \ do { \ /* unpack and multiply */ \ union v##sign##int8x16_impl_data *vec1d = (union v##sign##int8x16_impl_data *)&vec1; \ union v##sign##int8x16_impl_data *vec2d = (union v##sign##int8x16_impl_data *)&vec2; \ \ __m128i dst_even = _mm_##op##_epi16(vec1d->sse, vec2d->sse); \ __m128i dst_odd = _mm_##op##_epi16(_mm_srli_epi16(vec1d->sse, 8), _mm_srli_epi16(vec2d->sse, 8)); \ \ /* repack */ \ vec1d->sse = _mm_or_si128( \ _mm_slli_epi16(dst_odd, 8), \ _mm_srli_epi16(_mm_slli_epi16(dst_even, 8), 8) \ ); \ return vec1d->vec; \ } while (0) // shared between SSE2 variations #define VEC_SSE2_MUL_8x16(sign) \ VEC_SSE2_OPERATION_8x16(mullo, sign) #define VEC_SSE2_MUL_16x8(sign) \ do { \ /* we have a real instruction for this */ \ union v##sign##int16x8_impl_data *vec1d = (union v##sign##int16x8_impl_data *)&vec1; \ union v##sign##int16x8_impl_data *vec2d = (union v##sign##int16x8_impl_data *)&vec2; \ \ vec1d->sse = _mm_mullo_epi16(vec1d->sse, vec2d->sse); \ return vec1d->vec; \ } while (0) #define VEC_SSE2_MUL_32x4(sign) \ do { \ /* this was stolen from... somewhere :) */ \ union v##sign##int32x4_impl_data *vec1d = (union v##sign##int32x4_impl_data *)&vec1; \ union v##sign##int32x4_impl_data *vec2d = (union v##sign##int32x4_impl_data *)&vec2; \ \ __m128i a13 = _mm_shuffle_epi32(vec1d->sse, 0xF5); /* (-,a3,-,a1) */ \ __m128i b13 = _mm_shuffle_epi32(vec2d->sse, 0xF5); /* (-,b3,-,b1) */ \ __m128i prod02 = _mm_mul_epu32(vec1d->sse, vec2d->sse); /* (-,a2*b2,-,a0*b0) */ \ __m128i prod13 = _mm_mul_epu32(a13, b13); /* (-,a3*b3,-,a1*b1) */ \ __m128i prod01 = _mm_unpacklo_epi32(prod02,prod13); /* (-,-,a1*b1,a0*b0) */ \ __m128i prod23 = _mm_unpackhi_epi32(prod02,prod13); /* (-,-,a3*b3,a2*b2) */ \ \ vec1d->sse = _mm_srl_epi64(prod01, prod23); /* (ab3,ab2,ab1,ab0) */ \ return vec1d->vec; \ } while (0) #define VEC_SSE2_MUL_64x2(sign) \ do { \ union v##sign##int64x2_impl_data *vec1d = (union v##sign##int64x2_impl_data *)&vec1; \ union v##sign##int64x2_impl_data *vec2d = (union v##sign##int64x2_impl_data *)&vec2; \ \ __m128i ac = _mm_mul_epu32(vec1d->sse, vec2d->sse); /* ac = (vec1 & UINT32_MAX) * (vec2 & UINT32_MAX); */ \ __m128i b = _mm_srli_epi64(vec1d->sse, 32); /* b = vec1 >> 32; */ \ __m128i bc = _mm_mul_epu32(b, vec2d->sse); /* bc = b * (vec2 & UINT32_MAX); */ \ __m128i d = _mm_srli_epi64(vec2d->sse, 32); /* d = vec2 >> 32; */ \ __m128i ad = _mm_mul_epu32(vec1d->sse, d); /* ad = (vec1 & UINT32_MAX) * d; */ \ __m128i hi = _mm_add_epi64(bc, ad); /* hi = bc + ad; */ \ hi = _mm_slli_epi64(hi, 32); /* hi <<= 32; */ \ \ vec1d->sse = _mm_add_epi64(hi, ac); /* (ab3,ab2,ab1,ab0) */ \ return vec1d->vec; \ } while (0) #define VEC_SSE2_CMPEQ_8x16(sign) \ do { \ union v##sign##int8x16_impl_data *vec1d = (union v##sign##int8x16_impl_data *)&vec1; \ union v##sign##int8x16_impl_data *vec2d = (union v##sign##int8x16_impl_data *)&vec2; \ \ vec1d->sse = _mm_cmpeq_epi8(vec1d->sse, vec2d->sse); \ return vec1d->vec; \ } while (0) #define VEC_SSE2_CMPEQ_16x8(sign) \ do { \ union v##sign##int16x8_impl_data *vec1d = (union v##sign##int16x8_impl_data *)&vec1; \ union v##sign##int16x8_impl_data *vec2d = (union v##sign##int16x8_impl_data *)&vec2; \ \ vec1d->sse = _mm_cmpeq_epi16(vec1d->sse, vec2d->sse); \ return vec1d->vec; \ } while (0) #define VEC_SSE2_CMPEQ_32x4(sign) \ do { \ union v##sign##int32x4_impl_data *vec1d = (union v##sign##int32x4_impl_data *)&vec1; \ union v##sign##int32x4_impl_data *vec2d = (union v##sign##int32x4_impl_data *)&vec2; \ \ vec1d->sse = _mm_cmpeq_epi32(vec1d->sse, vec2d->sse); \ return vec1d->vec; \ } while (0) // SSE2 doesn't have an intrinsic for 64x2 equality comparison, // so how can we take a 32x4 comparison result and turn it into // a 64x2 comparison result? // // well, Intel conveniently provided an operation where we can // shuffle around 32-bit integers (_mm_shuffle_epi32). // // this means all we have to do is simply do the 32-bit operation, // shuffle the parts, and then return a bitwise AND of the result. #define VEC_SSE2_CMPEQ_64x2(sign) \ do { \ union v##sign##int64x2_impl_data *vec1d = (union v##sign##int64x2_impl_data *)&vec1; \ union v##sign##int64x2_impl_data *vec2d = (union v##sign##int64x2_impl_data *)&vec2; \ \ vec1d->sse = _mm_cmpeq_epi32(vec1d->sse, vec2d->sse); \ vec2d->sse = _mm_shuffle_epi32(vec1d->sse, _MM_SHUFFLE(1, 1, 3, 3)); \ vec1d->sse = _mm_shuffle_epi32(vec1d->sse, _MM_SHUFFLE(0, 0, 2, 2)); \ vec1d->sse = _mm_and_si128(vec1d->sse, vec2d->sse); \ \ return vec1d->vec; \ } while (0) #define VEC_SSE2_DEFINE_OPERATIONS_SIGN(sign, bits, size) \ union v##sign##int##bits##x##size##_impl_data { \ v##sign##int##bits##x##size vec; \ __m128i sse; \ }; \ \ VEC_STATIC_ASSERT(VEC_ALIGNOF(__m128i) <= VEC_ALIGNOF(v##sign##int##bits##x##size), "vec: v" #sign "int" #bits "x" #size " alignment needs to be expanded to fit intrinsic type size"); \ VEC_STATIC_ASSERT(sizeof(__m128i) <= sizeof(v##sign##int##bits##x##size), "vec: v" #sign "int" #bits "x" #size " needs to be expanded to fit intrinsic type size"); \ \ v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_load_aligned(const vec_##sign##int##bits in[size]) \ { \ union v##sign##int##bits##x##size##_impl_data vec; \ vec.sse = _mm_load_si128((const __m128i *)in); \ return vec.vec; \ } \ \ v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_load(const vec_##sign##int##bits in[size]) \ { \ union v##sign##int##bits##x##size##_impl_data vec; \ vec.sse = _mm_loadu_si128((const __m128i *)in); \ return vec.vec; \ } \ \ void v##sign##int##bits##x##size##_sse2_store_aligned(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \ { \ _mm_store_si128((__m128i *)out, ((union v##sign##int##bits##x##size##_impl_data *)&vec)->sse); \ } \ \ void v##sign##int##bits##x##size##_sse2_store(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \ { \ _mm_storeu_si128((__m128i *)out, ((union v##sign##int##bits##x##size##_impl_data *)&vec)->sse); \ } \ \ v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_add(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ { \ union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ \ vec1d->sse = _mm_add_epi##bits(vec1d->sse, vec2d->sse); \ return vec1d->vec; \ } \ \ v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_sub(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ { \ union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ \ vec1d->sse = _mm_sub_epi##bits(vec1d->sse, vec2d->sse); \ return vec1d->vec; \ } \ \ v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_mul(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ { \ VEC_SSE2_MUL_##bits##x##size(sign); \ } \ \ v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_and(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ { \ union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ \ vec1d->sse = _mm_and_si128(vec1d->sse, vec2d->sse); \ return vec1d->vec; \ } \ \ v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_or(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ { \ union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ \ vec1d->sse = _mm_or_si128(vec1d->sse, vec2d->sse); \ return vec1d->vec; \ } \ \ v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_xor(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ { \ union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ \ vec1d->sse = _mm_xor_si128(vec1d->sse, vec2d->sse); \ return vec1d->vec; \ } \ \ v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_cmpeq(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ { \ VEC_SSE2_CMPEQ_##bits##x##size(sign); \ } \ \ const v##sign##int##bits##x##size##_impl v##sign##int##bits##x##size##_impl_sse2 = { \ v##sign##int##bits##x##size##_generic_splat, \ v##sign##int##bits##x##size##_sse2_load_aligned, \ v##sign##int##bits##x##size##_sse2_load, \ v##sign##int##bits##x##size##_sse2_store_aligned, \ v##sign##int##bits##x##size##_sse2_store, \ v##sign##int##bits##x##size##_sse2_add, \ v##sign##int##bits##x##size##_sse2_sub, \ v##sign##int##bits##x##size##_sse2_mul, \ v##sign##int##bits##x##size##_generic_div, \ v##sign##int##bits##x##size##_generic_avg, \ v##sign##int##bits##x##size##_sse2_and, \ v##sign##int##bits##x##size##_sse2_or, \ v##sign##int##bits##x##size##_sse2_xor, \ v##sign##int##bits##x##size##_generic_not, \ v##sign##int##bits##x##size##_generic_lshift, \ v##sign##int##bits##x##size##_generic_rshift, \ v##sign##int##bits##x##size##_generic_lrshift, \ v##sign##int##bits##x##size##_generic_cmplt, \ v##sign##int##bits##x##size##_generic_cmple, \ v##sign##int##bits##x##size##_sse2_cmpeq, \ v##sign##int##bits##x##size##_generic_cmpge, \ v##sign##int##bits##x##size##_generic_cmpgt, \ }; #define VEC_SSE2_DEFINE_OPERATIONS(bits, size) \ VEC_SSE2_DEFINE_OPERATIONS_SIGN(u, bits, size) \ VEC_SSE2_DEFINE_OPERATIONS_SIGN( , bits, size) // SSE is *only* 128-bit VEC_SSE2_DEFINE_OPERATIONS(8, 16) VEC_SSE2_DEFINE_OPERATIONS(16, 8) VEC_SSE2_DEFINE_OPERATIONS(32, 4) VEC_SSE2_DEFINE_OPERATIONS(64, 2)