comparison src/impl/x86/sse2.c @ 23:e26874655738

*: huge refactor, new major release (hahaha) I keep finding things that are broken... The problem NOW was that vec would unintentionally build some functions with extended instruction sets, which is Bad and would mean that for all intents and purposes the CPU detection was completely broken. Now vec is no longer header only either. Boohoo. However this gives a lot more flexibility to vec since we no longer want or need to care about C++ crap. The NEON and Altivec implementations have not been updated which means they won't compile hence why they're commented out in the cmake build file.
author Paper <paper@tflc.us>
date Sun, 24 Nov 2024 02:52:40 -0500
parents
children e49e70f7012f
comparison
equal deleted inserted replaced
22:fbcd3fa6f8fc 23:e26874655738
1 /**
2 * vec - a tiny SIMD vector library in C99
3 *
4 * Copyright (c) 2024 Paper
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 **/
24
25 #include "vec/impl/x86/sse2.h"
26 #include "vec/impl/generic.h"
27
28 #include <emmintrin.h>
29
30 #define VEC_SSE2_OPERATION_8x16(op, sign) \
31 do { \
32 /* unpack and multiply */ \
33 union v##sign##int8x16_impl_data *vec1d = (union v##sign##int8x16_impl_data *)&vec1; \
34 union v##sign##int8x16_impl_data *vec2d = (union v##sign##int8x16_impl_data *)&vec2; \
35 \
36 __m128i dst_even = _mm_##op##_epi16(vec1d->sse, vec2d->sse); \
37 __m128i dst_odd = _mm_##op##_epi16(_mm_srli_epi16(vec1d->sse, 8), _mm_srli_epi16(vec2d->sse, 8)); \
38 \
39 /* repack */ \
40 vec1d->sse = _mm_or_si128( \
41 _mm_slli_epi16(dst_odd, 8), \
42 _mm_srli_epi16(_mm_slli_epi16(dst_even, 8), 8) \
43 ); \
44 return vec1d->vec; \
45 } while (0)
46
47 // shared between SSE2 variations
48 #define VEC_SSE2_MUL_8x16(sign) \
49 VEC_SSE2_OPERATION_8x16(mullo, sign)
50
51 #define VEC_SSE2_MUL_16x8(sign) \
52 do { \
53 /* we have a real instruction for this */ \
54 union v##sign##int16x8_impl_data *vec1d = (union v##sign##int16x8_impl_data *)&vec1; \
55 union v##sign##int16x8_impl_data *vec2d = (union v##sign##int16x8_impl_data *)&vec2; \
56 \
57 vec1d->sse = _mm_mullo_epi16(vec1d->sse, vec2d->sse); \
58 return vec1d->vec; \
59 } while (0)
60
61 #define VEC_SSE2_MUL_32x4(sign) \
62 do { \
63 /* this was stolen from... somewhere :) */ \
64 union v##sign##int32x4_impl_data *vec1d = (union v##sign##int32x4_impl_data *)&vec1; \
65 union v##sign##int32x4_impl_data *vec2d = (union v##sign##int32x4_impl_data *)&vec2; \
66 \
67 __m128i a13 = _mm_shuffle_epi32(vec1d->sse, 0xF5); /* (-,a3,-,a1) */ \
68 __m128i b13 = _mm_shuffle_epi32(vec2d->sse, 0xF5); /* (-,b3,-,b1) */ \
69 __m128i prod02 = _mm_mul_epu32(vec1d->sse, vec2d->sse); /* (-,a2*b2,-,a0*b0) */ \
70 __m128i prod13 = _mm_mul_epu32(a13, b13); /* (-,a3*b3,-,a1*b1) */ \
71 __m128i prod01 = _mm_unpacklo_epi32(prod02,prod13); /* (-,-,a1*b1,a0*b0) */ \
72 __m128i prod23 = _mm_unpackhi_epi32(prod02,prod13); /* (-,-,a3*b3,a2*b2) */ \
73 \
74 vec1d->sse = _mm_srl_epi64(prod01, prod23); /* (ab3,ab2,ab1,ab0) */ \
75 return vec1d->vec; \
76 } while (0)
77
78 #define VEC_SSE2_MUL_64x2(sign) \
79 do { \
80 union v##sign##int64x2_impl_data *vec1d = (union v##sign##int64x2_impl_data *)&vec1; \
81 union v##sign##int64x2_impl_data *vec2d = (union v##sign##int64x2_impl_data *)&vec2; \
82 \
83 __m128i ac = _mm_mul_epu32(vec1d->sse, vec2d->sse); /* ac = (vec1 & UINT32_MAX) * (vec2 & UINT32_MAX); */ \
84 __m128i b = _mm_srli_epi64(vec1d->sse, 32); /* b = vec1 >> 32; */ \
85 __m128i bc = _mm_mul_epu32(b, vec2d->sse); /* bc = b * (vec2 & UINT32_MAX); */ \
86 __m128i d = _mm_srli_epi64(vec2d->sse, 32); /* d = vec2 >> 32; */ \
87 __m128i ad = _mm_mul_epu32(vec1d->sse, d); /* ad = (vec1 & UINT32_MAX) * d; */ \
88 __m128i hi = _mm_add_epi64(bc, ad); /* hi = bc + ad; */ \
89 hi = _mm_slli_epi64(hi, 32); /* hi <<= 32; */ \
90 \
91 vec1d->sse = _mm_add_epi64(hi, ac); /* (ab3,ab2,ab1,ab0) */ \
92 return vec1d->vec; \
93 } while (0)
94
95 #define VEC_SSE2_CMPEQ_8x16(sign) \
96 do { \
97 union v##sign##int8x16_impl_data *vec1d = (union v##sign##int8x16_impl_data *)&vec1; \
98 union v##sign##int8x16_impl_data *vec2d = (union v##sign##int8x16_impl_data *)&vec2; \
99 \
100 vec1d->sse = _mm_cmpeq_epi8(vec1d->sse, vec2d->sse); \
101 return vec1d->vec; \
102 } while (0)
103
104 #define VEC_SSE2_CMPEQ_16x8(sign) \
105 do { \
106 union v##sign##int16x8_impl_data *vec1d = (union v##sign##int16x8_impl_data *)&vec1; \
107 union v##sign##int16x8_impl_data *vec2d = (union v##sign##int16x8_impl_data *)&vec2; \
108 \
109 vec1d->sse = _mm_cmpeq_epi16(vec1d->sse, vec2d->sse); \
110 return vec1d->vec; \
111 } while (0)
112
113 #define VEC_SSE2_CMPEQ_32x4(sign) \
114 do { \
115 union v##sign##int32x4_impl_data *vec1d = (union v##sign##int32x4_impl_data *)&vec1; \
116 union v##sign##int32x4_impl_data *vec2d = (union v##sign##int32x4_impl_data *)&vec2; \
117 \
118 vec1d->sse = _mm_cmpeq_epi32(vec1d->sse, vec2d->sse); \
119 return vec1d->vec; \
120 } while (0)
121
122 // SSE2 doesn't have an intrinsic for 64x2 equality comparison,
123 // so how can we take a 32x4 comparison result and turn it into
124 // a 64x2 comparison result?
125 //
126 // well, Intel conveniently provided an operation where we can
127 // shuffle around 32-bit integers (_mm_shuffle_epi32).
128 //
129 // this means all we have to do is simply do the 32-bit operation,
130 // shuffle the parts, and then return a bitwise AND of the result.
131
132 #define VEC_SSE2_CMPEQ_64x2(sign) \
133 do { \
134 union v##sign##int64x2_impl_data *vec1d = (union v##sign##int64x2_impl_data *)&vec1; \
135 union v##sign##int64x2_impl_data *vec2d = (union v##sign##int64x2_impl_data *)&vec2; \
136 \
137 vec1d->sse = _mm_cmpeq_epi32(vec1d->sse, vec2d->sse); \
138 vec2d->sse = _mm_shuffle_epi32(vec1d->sse, _MM_SHUFFLE(1, 1, 3, 3)); \
139 vec1d->sse = _mm_shuffle_epi32(vec1d->sse, _MM_SHUFFLE(0, 0, 2, 2)); \
140 vec1d->sse = _mm_and_si128(vec1d->sse, vec2d->sse); \
141 \
142 return vec1d->vec; \
143 } while (0)
144
145 #define VEC_SSE2_DEFINE_OPERATIONS_SIGN(sign, bits, size) \
146 union v##sign##int##bits##x##size##_impl_data { \
147 v##sign##int##bits##x##size vec; \
148 __m128i sse; \
149 }; \
150 \
151 v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_load_aligned(const vec_##sign##int##bits in[size]) \
152 { \
153 union v##sign##int##bits##x##size##_impl_data vec; \
154 vec.sse = _mm_load_si128((const __m128i *)in); \
155 return vec.vec; \
156 } \
157 \
158 v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_load(const vec_##sign##int##bits in[size]) \
159 { \
160 union v##sign##int##bits##x##size##_impl_data vec; \
161 vec.sse = _mm_loadu_si128((const __m128i *)in); \
162 return vec.vec; \
163 } \
164 \
165 void v##sign##int##bits##x##size##_sse2_store_aligned(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \
166 { \
167 _mm_store_si128((__m128i *)out, ((union v##sign##int##bits##x##size##_impl_data *)&vec)->sse); \
168 } \
169 \
170 void v##sign##int##bits##x##size##_sse2_store(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \
171 { \
172 _mm_storeu_si128((__m128i *)out, ((union v##sign##int##bits##x##size##_impl_data *)&vec)->sse); \
173 } \
174 \
175 v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_add(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
176 { \
177 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
178 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
179 \
180 vec1d->sse = _mm_add_epi##bits(vec1d->sse, vec2d->sse); \
181 return vec1d->vec; \
182 } \
183 \
184 v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_sub(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
185 { \
186 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
187 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
188 \
189 vec1d->sse = _mm_sub_epi##bits(vec1d->sse, vec2d->sse); \
190 return vec1d->vec; \
191 } \
192 \
193 v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_mul(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
194 { \
195 VEC_SSE2_MUL_##bits##x##size(sign); \
196 } \
197 \
198 v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_and(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
199 { \
200 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
201 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
202 \
203 vec1d->sse = _mm_and_si128(vec1d->sse, vec2d->sse); \
204 return vec1d->vec; \
205 } \
206 \
207 v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_or(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
208 { \
209 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
210 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
211 \
212 vec1d->sse = _mm_or_si128(vec1d->sse, vec2d->sse); \
213 return vec1d->vec; \
214 } \
215 \
216 v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_xor(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
217 { \
218 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
219 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
220 \
221 vec1d->sse = _mm_xor_si128(vec1d->sse, vec2d->sse); \
222 return vec1d->vec; \
223 } \
224 \
225 v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_cmpeq(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
226 { \
227 VEC_SSE2_CMPEQ_##bits##x##size(sign); \
228 } \
229 \
230 const v##sign##int##bits##x##size##_impl v##sign##int##bits##x##size##_impl_sse2 = { \
231 v##sign##int##bits##x##size##_generic_splat, \
232 v##sign##int##bits##x##size##_sse2_load_aligned, \
233 v##sign##int##bits##x##size##_sse2_load, \
234 v##sign##int##bits##x##size##_sse2_store_aligned, \
235 v##sign##int##bits##x##size##_sse2_store, \
236 v##sign##int##bits##x##size##_sse2_add, \
237 v##sign##int##bits##x##size##_sse2_sub, \
238 v##sign##int##bits##x##size##_sse2_mul, \
239 v##sign##int##bits##x##size##_generic_div, \
240 v##sign##int##bits##x##size##_generic_avg, \
241 v##sign##int##bits##x##size##_sse2_and, \
242 v##sign##int##bits##x##size##_sse2_or, \
243 v##sign##int##bits##x##size##_sse2_xor, \
244 v##sign##int##bits##x##size##_generic_not, \
245 v##sign##int##bits##x##size##_generic_lshift, \
246 v##sign##int##bits##x##size##_generic_rshift, \
247 v##sign##int##bits##x##size##_generic_lrshift, \
248 v##sign##int##bits##x##size##_generic_cmplt, \
249 v##sign##int##bits##x##size##_generic_cmple, \
250 v##sign##int##bits##x##size##_sse2_cmpeq, \
251 v##sign##int##bits##x##size##_generic_cmpge, \
252 v##sign##int##bits##x##size##_generic_cmpgt, \
253 };
254
255 #define VEC_SSE2_DEFINE_OPERATIONS(bits, size) \
256 VEC_SSE2_DEFINE_OPERATIONS_SIGN(u, bits, size) \
257 VEC_SSE2_DEFINE_OPERATIONS_SIGN( , bits, size)
258
259 // SSE is *only* 128-bit
260 VEC_SSE2_DEFINE_OPERATIONS(8, 16)
261 VEC_SSE2_DEFINE_OPERATIONS(16, 8)
262 VEC_SSE2_DEFINE_OPERATIONS(32, 4)
263 VEC_SSE2_DEFINE_OPERATIONS(64, 2)