comparison src/impl/x86/sse41.c @ 31:bf6ad516f1e6

Backed out changeset c6c99ab1088a
author Paper <paper@tflc.us>
date Fri, 25 Apr 2025 17:40:33 -0400
parents c6c99ab1088a
children 99e4539f922f
comparison
equal deleted inserted replaced
30:641d8c79b1da 31:bf6ad516f1e6
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE. 22 * SOFTWARE.
23 **/ 23 **/
24 24
25 #include "vec/impl/x86/sse41.h" 25 #include "vec/impl/x86/sse41.h"
26 #include "vec/impl/x86/sse2.h"
26 #include "vec/impl/generic.h" 27 #include "vec/impl/generic.h"
27 28
28 #include <immintrin.h> 29 #include <immintrin.h>
29 30
30 /* ------------------------------------------------------------------------ */ 31 // SSE 4.1 provides a real _mm_mullo_epi32
31 32 #define VEC_SSE41_DEFINE_OPERATIONS(sign) \
32 #define VEC_SSE41_MINMAX_TEMPLATE(SIGN, BITS, SIZE, INTLSIGN, OP) \ 33 union v##sign##int32x4_impl_data { \
33 VEC_FUNC_IMPL v##SIGN##int##BITS##x##SIZE v##SIGN##int##BITS##x##SIZE##_sse41_##OP(v##SIGN##int##BITS##x##SIZE vec1, v##SIGN##int##BITS##x##SIZE vec2) \ 34 v##sign##int32x4 vec; \
34 { \ 35 __m128i sse; \
35 union v##SIGN##int##BITS##x##SIZE##_impl_data *vec1d = (union v##SIGN##int##BITS##x##SIZE##_impl_data *)&vec1; \ 36 }; \
36 union v##SIGN##int##BITS##x##SIZE##_impl_data *vec2d = (union v##SIGN##int##BITS##x##SIZE##_impl_data *)&vec2; \
37 \ 37 \
38 vec1d->sse = _mm_##OP##_ep##INTLSIGN##BITS(vec1d->sse, vec2d->sse); \ 38 VEC_STATIC_ASSERT(VEC_ALIGNOF(__m128i) <= VEC_ALIGNOF(v##sign##int32x4), "vec: v" #sign "int32x4 alignment needs to be expanded to fit intrinsic type size"); \
39 VEC_STATIC_ASSERT(sizeof(__m128i) <= sizeof(v##sign##int32x4), "vec: v" #sign "int32x4 needs to be expanded to fit intrinsic type size"); \
39 \ 40 \
40 return vec1d->vec; \ 41 static v##sign##int32x4 v##sign##int32x4_sse41_mul(v##sign##int32x4 vec1, v##sign##int32x4 vec2) \
41 }
42
43 #define VEC_SSE41_MINMAX_8x16(OP) VEC_SSE41_MINMAX_TEMPLATE( , 8, 16, i, OP)
44 #define VEC_SSE41_MINMAX_u8x16(OP) VEC_SSE41_MINMAX_TEMPLATE(u, 8, 16, u, OP)
45 #define VEC_SSE41_MINMAX_16x8(OP) VEC_SSE41_MINMAX_TEMPLATE( , 16, 8, i, OP)
46 #define VEC_SSE41_MINMAX_u16x8(OP) VEC_SSE41_MINMAX_TEMPLATE(u, 16, 8, u, OP)
47 #define VEC_SSE41_MINMAX_32x4(OP) VEC_SSE41_MINMAX_TEMPLATE( , 32, 4, i, OP)
48 #define VEC_SSE41_MINMAX_u32x4(OP) VEC_SSE41_MINMAX_TEMPLATE(u, 32, 4, u, OP)
49 #define VEC_SSE41_MINMAX_64x2(OP) /* nothing */
50 #define VEC_SSE41_MINMAX_u64x2(OP) /* nothing */
51
52 #define VEC_SSE41_STRUCT_MINMAX_8x16(OP, SIGN) v##SIGN##int8x16_sse41_##OP
53 #define VEC_SSE41_STRUCT_MINMAX_16x8(OP, SIGN) v##SIGN##int16x8_sse41_##OP
54 #define VEC_SSE41_STRUCT_MINMAX_32x4(OP, SIGN) v##SIGN##int32x4_sse41_##OP
55 #define VEC_SSE41_STRUCT_MINMAX_64x2(OP, SIGN) NULL
56
57 /* ------------------------------------------------------------------------ */
58 /* multiplication */
59
60 #define VEC_SSE41_MUL_8x16(sign)
61 #define VEC_SSE41_MUL_16x8(sign)
62 #define VEC_SSE41_MUL_32x4(sign) \
63 VEC_FUNC_IMPL v##sign##int32x4 v##sign##int32x4_sse41_mul(v##sign##int32x4 vec1, v##sign##int32x4 vec2) \
64 { \ 42 { \
65 union v##sign##int32x4_impl_data *vec1d = (union v##sign##int32x4_impl_data *)&vec1; \ 43 union v##sign##int32x4_impl_data *vec1d = (union v##sign##int32x4_impl_data *)&vec1; \
66 union v##sign##int32x4_impl_data *vec2d = (union v##sign##int32x4_impl_data *)&vec2; \ 44 union v##sign##int32x4_impl_data *vec2d = (union v##sign##int32x4_impl_data *)&vec2; \
67 \ 45 \
68 vec1d->sse = _mm_mullo_epi32(vec1d->sse, vec2d->sse); \ 46 vec1d->sse = _mm_mullo_epi32(vec1d->sse, vec2d->sse); \
69 return vec1d->vec; \ 47 return vec1d->vec; \
70 } 48 } \
71 #define VEC_SSE41_MUL_64x2(sign)
72
73 #define VEC_SSE41_STRUCT_MUL_8x16(SIGN) NULL
74 #define VEC_SSE41_STRUCT_MUL_16x8(SIGN) NULL
75 #define VEC_SSE41_STRUCT_MUL_32x4(SIGN) v##SIGN##int32x4_sse41_mul
76 #define VEC_SSE41_STRUCT_MUL_64x2(SIGN) NULL
77
78 /* ------------------------------------------------------------------------ */
79 /* comparison */
80
81 #define MM_SET1_64(x) _mm_set1_epi64x(x)
82
83 /* helper funcs */
84 #define VEC_xSSE41_CMP(name, op, sign, bits, size, first, second, VARS, TRANS1, TRANS2) \
85 VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse41_##name(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
86 { \
87 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
88 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
89 VARS \
90 \ 49 \
91 TRANS1 \ 50 const v##sign##int32x4_impl v##sign##int32x4_impl_sse41 = { \
92 \ 51 v##sign##int32x4_generic_splat, \
93 vec1d->sse = _mm_##op##_epi##bits(vec##first##d->sse, vec##second##d->sse); \ 52 v##sign##int32x4_sse2_load_aligned, \
94 \ 53 v##sign##int32x4_sse2_load, \
95 TRANS2 \ 54 v##sign##int32x4_sse2_store_aligned, \
96 \ 55 v##sign##int32x4_sse2_store, \
97 return vec1d->vec; \ 56 v##sign##int32x4_sse2_add, \
98 } 57 v##sign##int32x4_sse2_sub, \
99 58 v##sign##int32x4_sse41_mul, \
100 #define VEC_SSE41_CMP(name, op, bits, size, first, second) \ 59 v##sign##int32x4_generic_div, \
101 VEC_xSSE41_CMP(name, op, /* nothing */, bits, size, first, second, /* nothing */, /* nothing */, /* nothing */) 60 v##sign##int32x4_generic_avg, \
102 61 v##sign##int32x4_sse2_and, \
103 #define VEC_uSSE41_CMP(name, op, bits, size, first, second) \ 62 v##sign##int32x4_sse2_or, \
104 VEC_xSSE41_CMP(name, op, u, bits, size, first, second, \ 63 v##sign##int32x4_sse2_xor, \
105 __m128i xor_val = MM_SET1_##bits(UINT64_C(1) << (bits - 1)); \ 64 v##sign##int32x4_generic_not, \
106 , { \ 65 v##sign##int32x4_generic_lshift, \
107 vec1d->sse = _mm_xor_si128(vec1d->sse, xor_val); \ 66 v##sign##int32x4_generic_rshift, \
108 vec2d->sse = _mm_xor_si128(vec2d->sse, xor_val); \ 67 v##sign##int32x4_generic_lrshift, \
109 }, \ 68 v##sign##int32x4_generic_cmplt, \
110 { \ 69 v##sign##int32x4_generic_cmple, \
111 /* nothing */ \ 70 v##sign##int32x4_sse2_cmpeq, \
112 }) 71 v##sign##int32x4_generic_cmpge, \
113 72 v##sign##int32x4_generic_cmpgt, \
114 /* these are the same for unsigned and signed, for obvious reasons. */
115 #define VEC_SSE41_CMPEQ_8x16(sign) /* nothing */
116 #define VEC_SSE41_CMPEQ_16x8(sign) /* nothing */
117 #define VEC_SSE41_CMPEQ_32x4(sign) /* nothing */
118 #define VEC_SSE41_CMPEQ_64x2(sign) VEC_xSSE41_CMP(cmpeq, cmpeq, sign, 64, 2, 1, 2, , ,)
119
120 /* ------------------------------------------------------------------------ */
121
122 #define VEC_SSE41_STRUCT_CMP_8x16(name, sign) NULL
123 #define VEC_SSE41_STRUCT_CMP_16x8(name, sign) NULL
124 #define VEC_SSE41_STRUCT_CMP_32x4(name, sign) NULL
125 #define VEC_SSE41_STRUCT_CMP_64x2(name, sign) v##sign##int64x2_sse41_##name
126
127 /* ------------------------------------------------------------------------ */
128
129 // SSE 4.1 provides a real _mm_mullo_epi32
130 #define VEC_SSE41_DEFINE_OPERATIONS_SIGN(sign, bits, size) \
131 union v##sign##int##bits##x##size##_impl_data { \
132 v##sign##int##bits##x##size vec; \
133 __m128i sse; \
134 }; \
135 \
136 VEC_STATIC_ASSERT(VEC_ALIGNOF(__m128i) <= VEC_ALIGNOF(v##sign##int##bits##x##size), "vec: v" #sign "int32x4 alignment needs to be expanded to fit intrinsic type size"); \
137 VEC_STATIC_ASSERT(sizeof(__m128i) <= sizeof(v##sign##int##bits##x##size), "vec: v" #sign "int32x4 needs to be expanded to fit intrinsic type size"); \
138 \
139 VEC_SSE41_MUL_##bits##x##size(sign) \
140 \
141 VEC_SSE41_MINMAX_##sign##bits##x##size(min) \
142 VEC_SSE41_MINMAX_##sign##bits##x##size(max) \
143 \
144 VEC_SSE41_CMPEQ_##bits##x##size(sign); \
145 \
146 const v##sign##int##bits##x##size##_impl v##sign##int##bits##x##size##_impl_sse41 = { \
147 .mul = VEC_SSE41_STRUCT_MUL_##bits##x##size(sign), \
148 .min = VEC_SSE41_STRUCT_MINMAX_##bits##x##size(min, sign), \
149 .max = VEC_SSE41_STRUCT_MINMAX_##bits##x##size(max, sign), \
150 .cmpeq = VEC_SSE41_STRUCT_CMP_##bits##x##size(cmpeq, sign), \
151 }; 73 };
152 74
153 #define VEC_SSE41_DEFINE_OPERATIONS(bits, size) \ 75 VEC_SSE41_DEFINE_OPERATIONS()
154 VEC_SSE41_DEFINE_OPERATIONS_SIGN(u, bits, size) \ 76 VEC_SSE41_DEFINE_OPERATIONS(u)
155 VEC_SSE41_DEFINE_OPERATIONS_SIGN( , bits, size)
156
157 VEC_SSE41_DEFINE_OPERATIONS(8, 16)
158 VEC_SSE41_DEFINE_OPERATIONS(16, 8)
159 VEC_SSE41_DEFINE_OPERATIONS(32, 4)
160 VEC_SSE41_DEFINE_OPERATIONS(64, 2)