Mercurial > vec
comparison src/impl/x86/sse41.c @ 28:c6c99ab1088a
*: add min/max functions and a big big refactor (again)
agh, this time I added a few more implementations (and generally
made the code just a little faster...)
| author | Paper <paper@tflc.us> |
|---|---|
| date | Thu, 24 Apr 2025 00:54:02 -0400 |
| parents | e49e70f7012f |
| children | bf6ad516f1e6 |
comparison
equal
deleted
inserted
replaced
| 27:d00b95f95dd1 | 28:c6c99ab1088a |
|---|---|
| 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 * SOFTWARE. | 22 * SOFTWARE. |
| 23 **/ | 23 **/ |
| 24 | 24 |
| 25 #include "vec/impl/x86/sse41.h" | 25 #include "vec/impl/x86/sse41.h" |
| 26 #include "vec/impl/x86/sse2.h" | |
| 27 #include "vec/impl/generic.h" | 26 #include "vec/impl/generic.h" |
| 28 | 27 |
| 29 #include <immintrin.h> | 28 #include <immintrin.h> |
| 30 | 29 |
| 31 // SSE 4.1 provides a real _mm_mullo_epi32 | 30 /* ------------------------------------------------------------------------ */ |
| 32 #define VEC_SSE41_DEFINE_OPERATIONS(sign) \ | 31 |
| 33 union v##sign##int32x4_impl_data { \ | 32 #define VEC_SSE41_MINMAX_TEMPLATE(SIGN, BITS, SIZE, INTLSIGN, OP) \ |
| 34 v##sign##int32x4 vec; \ | 33 VEC_FUNC_IMPL v##SIGN##int##BITS##x##SIZE v##SIGN##int##BITS##x##SIZE##_sse41_##OP(v##SIGN##int##BITS##x##SIZE vec1, v##SIGN##int##BITS##x##SIZE vec2) \ |
| 35 __m128i sse; \ | 34 { \ |
| 36 }; \ | 35 union v##SIGN##int##BITS##x##SIZE##_impl_data *vec1d = (union v##SIGN##int##BITS##x##SIZE##_impl_data *)&vec1; \ |
| 36 union v##SIGN##int##BITS##x##SIZE##_impl_data *vec2d = (union v##SIGN##int##BITS##x##SIZE##_impl_data *)&vec2; \ | |
| 37 \ | 37 \ |
| 38 VEC_STATIC_ASSERT(VEC_ALIGNOF(__m128i) <= VEC_ALIGNOF(v##sign##int32x4), "vec: v" #sign "int32x4 alignment needs to be expanded to fit intrinsic type size"); \ | 38 vec1d->sse = _mm_##OP##_ep##INTLSIGN##BITS(vec1d->sse, vec2d->sse); \ |
| 39 VEC_STATIC_ASSERT(sizeof(__m128i) <= sizeof(v##sign##int32x4), "vec: v" #sign "int32x4 needs to be expanded to fit intrinsic type size"); \ | |
| 40 \ | 39 \ |
| 41 static v##sign##int32x4 v##sign##int32x4_sse41_mul(v##sign##int32x4 vec1, v##sign##int32x4 vec2) \ | 40 return vec1d->vec; \ |
| 41 } | |
| 42 | |
| 43 #define VEC_SSE41_MINMAX_8x16(OP) VEC_SSE41_MINMAX_TEMPLATE( , 8, 16, i, OP) | |
| 44 #define VEC_SSE41_MINMAX_u8x16(OP) VEC_SSE41_MINMAX_TEMPLATE(u, 8, 16, u, OP) | |
| 45 #define VEC_SSE41_MINMAX_16x8(OP) VEC_SSE41_MINMAX_TEMPLATE( , 16, 8, i, OP) | |
| 46 #define VEC_SSE41_MINMAX_u16x8(OP) VEC_SSE41_MINMAX_TEMPLATE(u, 16, 8, u, OP) | |
| 47 #define VEC_SSE41_MINMAX_32x4(OP) VEC_SSE41_MINMAX_TEMPLATE( , 32, 4, i, OP) | |
| 48 #define VEC_SSE41_MINMAX_u32x4(OP) VEC_SSE41_MINMAX_TEMPLATE(u, 32, 4, u, OP) | |
| 49 #define VEC_SSE41_MINMAX_64x2(OP) /* nothing */ | |
| 50 #define VEC_SSE41_MINMAX_u64x2(OP) /* nothing */ | |
| 51 | |
| 52 #define VEC_SSE41_STRUCT_MINMAX_8x16(OP, SIGN) v##SIGN##int8x16_sse41_##OP | |
| 53 #define VEC_SSE41_STRUCT_MINMAX_16x8(OP, SIGN) v##SIGN##int16x8_sse41_##OP | |
| 54 #define VEC_SSE41_STRUCT_MINMAX_32x4(OP, SIGN) v##SIGN##int32x4_sse41_##OP | |
| 55 #define VEC_SSE41_STRUCT_MINMAX_64x2(OP, SIGN) NULL | |
| 56 | |
| 57 /* ------------------------------------------------------------------------ */ | |
| 58 /* multiplication */ | |
| 59 | |
| 60 #define VEC_SSE41_MUL_8x16(sign) | |
| 61 #define VEC_SSE41_MUL_16x8(sign) | |
| 62 #define VEC_SSE41_MUL_32x4(sign) \ | |
| 63 VEC_FUNC_IMPL v##sign##int32x4 v##sign##int32x4_sse41_mul(v##sign##int32x4 vec1, v##sign##int32x4 vec2) \ | |
| 42 { \ | 64 { \ |
| 43 union v##sign##int32x4_impl_data *vec1d = (union v##sign##int32x4_impl_data *)&vec1; \ | 65 union v##sign##int32x4_impl_data *vec1d = (union v##sign##int32x4_impl_data *)&vec1; \ |
| 44 union v##sign##int32x4_impl_data *vec2d = (union v##sign##int32x4_impl_data *)&vec2; \ | 66 union v##sign##int32x4_impl_data *vec2d = (union v##sign##int32x4_impl_data *)&vec2; \ |
| 45 \ | 67 \ |
| 46 vec1d->sse = _mm_mullo_epi32(vec1d->sse, vec2d->sse); \ | 68 vec1d->sse = _mm_mullo_epi32(vec1d->sse, vec2d->sse); \ |
| 47 return vec1d->vec; \ | 69 return vec1d->vec; \ |
| 48 } \ | 70 } |
| 71 #define VEC_SSE41_MUL_64x2(sign) | |
| 72 | |
| 73 #define VEC_SSE41_STRUCT_MUL_8x16(SIGN) NULL | |
| 74 #define VEC_SSE41_STRUCT_MUL_16x8(SIGN) NULL | |
| 75 #define VEC_SSE41_STRUCT_MUL_32x4(SIGN) v##SIGN##int32x4_sse41_mul | |
| 76 #define VEC_SSE41_STRUCT_MUL_64x2(SIGN) NULL | |
| 77 | |
| 78 /* ------------------------------------------------------------------------ */ | |
| 79 /* comparison */ | |
| 80 | |
| 81 #define MM_SET1_64(x) _mm_set1_epi64x(x) | |
| 82 | |
| 83 /* helper funcs */ | |
| 84 #define VEC_xSSE41_CMP(name, op, sign, bits, size, first, second, VARS, TRANS1, TRANS2) \ | |
| 85 VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse41_##name(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ | |
| 86 { \ | |
| 87 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ | |
| 88 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ | |
| 89 VARS \ | |
| 49 \ | 90 \ |
| 50 const v##sign##int32x4_impl v##sign##int32x4_impl_sse41 = { \ | 91 TRANS1 \ |
| 51 v##sign##int32x4_generic_splat, \ | 92 \ |
| 52 v##sign##int32x4_sse2_load_aligned, \ | 93 vec1d->sse = _mm_##op##_epi##bits(vec##first##d->sse, vec##second##d->sse); \ |
| 53 v##sign##int32x4_sse2_load, \ | 94 \ |
| 54 v##sign##int32x4_sse2_store_aligned, \ | 95 TRANS2 \ |
| 55 v##sign##int32x4_sse2_store, \ | 96 \ |
| 56 v##sign##int32x4_sse2_add, \ | 97 return vec1d->vec; \ |
| 57 v##sign##int32x4_sse2_sub, \ | 98 } |
| 58 v##sign##int32x4_sse41_mul, \ | 99 |
| 59 v##sign##int32x4_generic_div, \ | 100 #define VEC_SSE41_CMP(name, op, bits, size, first, second) \ |
| 60 v##sign##int32x4_generic_avg, \ | 101 VEC_xSSE41_CMP(name, op, /* nothing */, bits, size, first, second, /* nothing */, /* nothing */, /* nothing */) |
| 61 v##sign##int32x4_sse2_and, \ | 102 |
| 62 v##sign##int32x4_sse2_or, \ | 103 #define VEC_uSSE41_CMP(name, op, bits, size, first, second) \ |
| 63 v##sign##int32x4_sse2_xor, \ | 104 VEC_xSSE41_CMP(name, op, u, bits, size, first, second, \ |
| 64 v##sign##int32x4_generic_not, \ | 105 __m128i xor_val = MM_SET1_##bits(UINT64_C(1) << (bits - 1)); \ |
| 65 v##sign##int32x4_generic_lshift, \ | 106 , { \ |
| 66 v##sign##int32x4_generic_rshift, \ | 107 vec1d->sse = _mm_xor_si128(vec1d->sse, xor_val); \ |
| 67 v##sign##int32x4_generic_lrshift, \ | 108 vec2d->sse = _mm_xor_si128(vec2d->sse, xor_val); \ |
| 68 v##sign##int32x4_generic_cmplt, \ | 109 }, \ |
| 69 v##sign##int32x4_generic_cmple, \ | 110 { \ |
| 70 v##sign##int32x4_sse2_cmpeq, \ | 111 /* nothing */ \ |
| 71 v##sign##int32x4_generic_cmpge, \ | 112 }) |
| 72 v##sign##int32x4_generic_cmpgt, \ | 113 |
| 114 /* these are the same for unsigned and signed, for obvious reasons. */ | |
| 115 #define VEC_SSE41_CMPEQ_8x16(sign) /* nothing */ | |
| 116 #define VEC_SSE41_CMPEQ_16x8(sign) /* nothing */ | |
| 117 #define VEC_SSE41_CMPEQ_32x4(sign) /* nothing */ | |
| 118 #define VEC_SSE41_CMPEQ_64x2(sign) VEC_xSSE41_CMP(cmpeq, cmpeq, sign, 64, 2, 1, 2, , ,) | |
| 119 | |
| 120 /* ------------------------------------------------------------------------ */ | |
| 121 | |
| 122 #define VEC_SSE41_STRUCT_CMP_8x16(name, sign) NULL | |
| 123 #define VEC_SSE41_STRUCT_CMP_16x8(name, sign) NULL | |
| 124 #define VEC_SSE41_STRUCT_CMP_32x4(name, sign) NULL | |
| 125 #define VEC_SSE41_STRUCT_CMP_64x2(name, sign) v##sign##int64x2_sse41_##name | |
| 126 | |
| 127 /* ------------------------------------------------------------------------ */ | |
| 128 | |
| 129 // SSE 4.1 provides a real _mm_mullo_epi32 | |
| 130 #define VEC_SSE41_DEFINE_OPERATIONS_SIGN(sign, bits, size) \ | |
| 131 union v##sign##int##bits##x##size##_impl_data { \ | |
| 132 v##sign##int##bits##x##size vec; \ | |
| 133 __m128i sse; \ | |
| 134 }; \ | |
| 135 \ | |
| 136 VEC_STATIC_ASSERT(VEC_ALIGNOF(__m128i) <= VEC_ALIGNOF(v##sign##int##bits##x##size), "vec: v" #sign "int32x4 alignment needs to be expanded to fit intrinsic type size"); \ | |
| 137 VEC_STATIC_ASSERT(sizeof(__m128i) <= sizeof(v##sign##int##bits##x##size), "vec: v" #sign "int32x4 needs to be expanded to fit intrinsic type size"); \ | |
| 138 \ | |
| 139 VEC_SSE41_MUL_##bits##x##size(sign) \ | |
| 140 \ | |
| 141 VEC_SSE41_MINMAX_##sign##bits##x##size(min) \ | |
| 142 VEC_SSE41_MINMAX_##sign##bits##x##size(max) \ | |
| 143 \ | |
| 144 VEC_SSE41_CMPEQ_##bits##x##size(sign); \ | |
| 145 \ | |
| 146 const v##sign##int##bits##x##size##_impl v##sign##int##bits##x##size##_impl_sse41 = { \ | |
| 147 .mul = VEC_SSE41_STRUCT_MUL_##bits##x##size(sign), \ | |
| 148 .min = VEC_SSE41_STRUCT_MINMAX_##bits##x##size(min, sign), \ | |
| 149 .max = VEC_SSE41_STRUCT_MINMAX_##bits##x##size(max, sign), \ | |
| 150 .cmpeq = VEC_SSE41_STRUCT_CMP_##bits##x##size(cmpeq, sign), \ | |
| 73 }; | 151 }; |
| 74 | 152 |
| 75 VEC_SSE41_DEFINE_OPERATIONS() | 153 #define VEC_SSE41_DEFINE_OPERATIONS(bits, size) \ |
| 76 VEC_SSE41_DEFINE_OPERATIONS(u) | 154 VEC_SSE41_DEFINE_OPERATIONS_SIGN(u, bits, size) \ |
| 155 VEC_SSE41_DEFINE_OPERATIONS_SIGN( , bits, size) | |
| 156 | |
| 157 VEC_SSE41_DEFINE_OPERATIONS(8, 16) | |
| 158 VEC_SSE41_DEFINE_OPERATIONS(16, 8) | |
| 159 VEC_SSE41_DEFINE_OPERATIONS(32, 4) | |
| 160 VEC_SSE41_DEFINE_OPERATIONS(64, 2) |
