Mercurial > vec
comparison src/impl/x86/avx2.c @ 23:e26874655738
*: huge refactor, new major release (hahaha)
I keep finding things that are broken...
The problem NOW was that vec would unintentionally build some
functions with extended instruction sets, which is Bad and would
mean that for all intents and purposes the CPU detection was
completely broken.
Now vec is no longer header only either. Boohoo. However this gives
a lot more flexibility to vec since we no longer want or need to
care about C++ crap.
The NEON and Altivec implementations have not been updated which
means they won't compile hence why they're commented out in the
cmake build file.
author | Paper <paper@tflc.us> |
---|---|
date | Sun, 24 Nov 2024 02:52:40 -0500 |
parents | |
children | e49e70f7012f |
comparison
equal
deleted
inserted
replaced
22:fbcd3fa6f8fc | 23:e26874655738 |
---|---|
1 /** | |
2 * vec - a tiny SIMD vector library in C99 | |
3 * | |
4 * Copyright (c) 2024 Paper | |
5 * | |
6 * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 * of this software and associated documentation files (the "Software"), to deal | |
8 * in the Software without restriction, including without limitation the rights | |
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 * copies of the Software, and to permit persons to whom the Software is | |
11 * furnished to do so, subject to the following conditions: | |
12 * | |
13 * The above copyright notice and this permission notice shall be included in all | |
14 * copies or substantial portions of the Software. | |
15 * | |
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
22 * SOFTWARE. | |
23 **/ | |
24 | |
25 #include "vec/impl/x86/avx2.h" | |
26 #include "vec/impl/generic.h" | |
27 | |
28 #include <immintrin.h> | |
29 | |
30 // this does NOT handle sign bits properly, use with caution | |
31 #define VEC_AVX2_OPERATION_8x32_16x16(op, sign) \ | |
32 do { \ | |
33 union v##sign##int8x32_impl_data *vec1d = (union v##sign##int8x32_impl_data *)&vec1; \ | |
34 union v##sign##int8x32_impl_data *vec2d = (union v##sign##int8x32_impl_data *)&vec2; \ | |
35 \ | |
36 /* unpack and multiply */ \ | |
37 __m256i dst_even = _mm256_##op##_epi16(vec1d->avx2, vec2d->avx2); \ | |
38 __m256i dst_odd = _mm256_##op##_epi16(_mm256_srli_epi16(vec1d->avx2, 8), _mm256_srli_epi16(vec2d->avx2, 8)); \ | |
39 \ | |
40 /* repack */ \ | |
41 vec1d->avx2 = _mm256_or_si256( \ | |
42 _mm256_slli_epi16(dst_odd, 8), \ | |
43 _mm256_srli_epi16(_mm256_slli_epi16(dst_even, 8), 8) \ | |
44 ); \ | |
45 return vec1d->vec; \ | |
46 } while (0) | |
47 | |
48 #define VEC_AVX2_OPERATION_16x16(op, sign) \ | |
49 do { \ | |
50 union v##sign##int16x16_impl_data *vec1d = (union v##sign##int16x16_impl_data *)&vec1; \ | |
51 union v##sign##int16x16_impl_data *vec2d = (union v##sign##int16x16_impl_data *)&vec2; \ | |
52 \ | |
53 /* unpack and multiply */ \ | |
54 __m256i dst_even = _mm256_##op##_epi32(vec1d->avx2, vec2d->avx2); \ | |
55 __m256i dst_odd = _mm256_##op##_epi32(_mm256_srli_epi32(vec1d->avx2, 16), _mm256_srli_epi32(vec2d->avx2, 16)); \ | |
56 \ | |
57 /* repack */ \ | |
58 vec1d->avx2 = _mm256_or_si256( \ | |
59 _mm256_slli_epi32(dst_odd, 16), \ | |
60 _mm256_srli_epi32(_mm256_slli_epi16(dst_even, 16), 16) \ | |
61 ); \ | |
62 return vec1d->vec; \ | |
63 } while (0) | |
64 | |
65 // multiplication | |
66 | |
67 #define VEC_AVX2_MUL_8x32(sign) \ | |
68 VEC_AVX2_OPERATION_8x32_16x16(mullo, sign) | |
69 | |
70 #define VEC_AVX2_MUL_16x16(sign) \ | |
71 do { \ | |
72 union v##sign##int16x16_impl_data *vec1d = (union v##sign##int16x16_impl_data *)&vec1; \ | |
73 union v##sign##int16x16_impl_data *vec2d = (union v##sign##int16x16_impl_data *)&vec2; \ | |
74 \ | |
75 vec1d->avx2 = _mm256_mullo_epi16(vec1d->avx2, vec2d->avx2); \ | |
76 return vec1d->vec; \ | |
77 } while (0) | |
78 | |
79 #define VEC_AVX2_MUL_32x8(sign) \ | |
80 do { \ | |
81 union v##sign##int32x8_impl_data *vec1d = (union v##sign##int32x8_impl_data *)&vec1; \ | |
82 union v##sign##int32x8_impl_data *vec2d = (union v##sign##int32x8_impl_data *)&vec2; \ | |
83 \ | |
84 vec1d->avx2 = _mm256_mullo_epi32(vec1d->avx2, vec2d->avx2); \ | |
85 return vec1d->vec; \ | |
86 } while (0) | |
87 | |
88 #define VEC_AVX2_MUL_64x4(sign) \ | |
89 do { \ | |
90 union v##sign##int64x4_impl_data *vec1d = (union v##sign##int64x4_impl_data *)&vec1; \ | |
91 union v##sign##int64x4_impl_data *vec2d = (union v##sign##int64x4_impl_data *)&vec2; \ | |
92 \ | |
93 __m256i ac = _mm256_mul_epu32(vec1d->avx2, vec2d->avx2); \ | |
94 __m256i b = _mm256_srli_epi64(vec1d->avx2, 32); \ | |
95 __m256i bc = _mm256_mul_epu32(b, vec2d->avx2); \ | |
96 __m256i d = _mm256_srli_epi64(vec2d->avx2, 32); \ | |
97 __m256i ad = _mm256_mul_epu32(vec1d->avx2, d); \ | |
98 __m256i hi = _mm256_add_epi64(bc, ad); \ | |
99 hi = _mm256_slli_epi64(hi, 32); \ | |
100 \ | |
101 vec1d->avx2 = _mm256_add_epi64(hi, ac); \ | |
102 return vec1d->vec; \ | |
103 } while (0) | |
104 | |
105 // operations | |
106 | |
107 #define VEC_AVX2_DEFINE_OPERATIONS_SIGN(sign, bits, size) \ | |
108 union v##sign##int##bits##x##size##_impl_data { \ | |
109 v##sign##int##bits##x##size vec; \ | |
110 __m256i avx2; \ | |
111 }; \ | |
112 \ | |
113 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_load_aligned(const vec_##sign##int##bits in[size]) \ | |
114 { \ | |
115 union v##sign##int##bits##x##size##_impl_data vec; \ | |
116 vec.avx2 = _mm256_load_si256((const __m256i *)in); \ | |
117 return vec.vec; \ | |
118 } \ | |
119 \ | |
120 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_load(const vec_##sign##int##bits in[size]) \ | |
121 { \ | |
122 union v##sign##int##bits##x##size##_impl_data vec; \ | |
123 vec.avx2 = _mm256_loadu_si256((const __m256i *)in); \ | |
124 return vec.vec; \ | |
125 } \ | |
126 \ | |
127 static void v##sign##int##bits##x##size##_avx2_store_aligned(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \ | |
128 { \ | |
129 _mm256_store_si256((__m256i *)out, ((union v##sign##int##bits##x##size##_impl_data*)&vec)->avx2); \ | |
130 } \ | |
131 \ | |
132 static void v##sign##int##bits##x##size##_avx2_store(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \ | |
133 { \ | |
134 _mm256_storeu_si256((__m256i *)out, ((union v##sign##int##bits##x##size##_impl_data*)&vec)->avx2); \ | |
135 } \ | |
136 \ | |
137 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_add(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ | |
138 { \ | |
139 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ | |
140 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ | |
141 \ | |
142 vec1d->avx2 = _mm256_add_epi##bits(vec1d->avx2, vec2d->avx2); \ | |
143 return vec1d->vec; \ | |
144 } \ | |
145 \ | |
146 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_sub(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ | |
147 { \ | |
148 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ | |
149 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ | |
150 \ | |
151 vec1d->avx2 = _mm256_sub_epi##bits(vec1d->avx2, vec2d->avx2); \ | |
152 return vec1d->vec; \ | |
153 } \ | |
154 \ | |
155 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_mul(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ | |
156 { \ | |
157 VEC_AVX2_MUL_##bits##x##size(sign); \ | |
158 } \ | |
159 \ | |
160 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_and(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ | |
161 { \ | |
162 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ | |
163 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ | |
164 \ | |
165 vec1d->avx2 = _mm256_and_si256(vec1d->avx2, vec2d->avx2); \ | |
166 return vec1d->vec; \ | |
167 } \ | |
168 \ | |
169 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_or(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ | |
170 { \ | |
171 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ | |
172 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ | |
173 \ | |
174 vec1d->avx2 = _mm256_or_si256(vec1d->avx2, vec2d->avx2); \ | |
175 return vec1d->vec; \ | |
176 } \ | |
177 \ | |
178 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_xor(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ | |
179 { \ | |
180 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ | |
181 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ | |
182 \ | |
183 vec1d->avx2 = _mm256_xor_si256(vec1d->avx2, vec2d->avx2); \ | |
184 return vec1d->vec; \ | |
185 } \ | |
186 \ | |
187 const v##sign##int##bits##x##size##_impl v##sign##int##bits##x##size##_impl_avx2 = { \ | |
188 v##sign##int##bits##x##size##_generic_splat, \ | |
189 v##sign##int##bits##x##size##_avx2_load_aligned, \ | |
190 v##sign##int##bits##x##size##_avx2_load, \ | |
191 v##sign##int##bits##x##size##_avx2_store_aligned, \ | |
192 v##sign##int##bits##x##size##_avx2_store, \ | |
193 v##sign##int##bits##x##size##_avx2_add, \ | |
194 v##sign##int##bits##x##size##_avx2_sub, \ | |
195 v##sign##int##bits##x##size##_avx2_mul, \ | |
196 v##sign##int##bits##x##size##_generic_div, \ | |
197 v##sign##int##bits##x##size##_generic_avg, \ | |
198 v##sign##int##bits##x##size##_avx2_and, \ | |
199 v##sign##int##bits##x##size##_avx2_or, \ | |
200 v##sign##int##bits##x##size##_avx2_xor, \ | |
201 v##sign##int##bits##x##size##_generic_not, \ | |
202 v##sign##int##bits##x##size##_generic_lshift, \ | |
203 v##sign##int##bits##x##size##_generic_rshift, \ | |
204 v##sign##int##bits##x##size##_generic_lrshift, \ | |
205 v##sign##int##bits##x##size##_generic_cmplt, \ | |
206 v##sign##int##bits##x##size##_generic_cmple, \ | |
207 v##sign##int##bits##x##size##_generic_cmpeq, \ | |
208 v##sign##int##bits##x##size##_generic_cmpge, \ | |
209 v##sign##int##bits##x##size##_generic_cmpgt, \ | |
210 }; | |
211 | |
212 #define VEC_AVX2_DEFINE_OPERATIONS(bits, size) \ | |
213 VEC_AVX2_DEFINE_OPERATIONS_SIGN( , bits, size) \ | |
214 VEC_AVX2_DEFINE_OPERATIONS_SIGN(u, bits, size) | |
215 | |
216 VEC_AVX2_DEFINE_OPERATIONS(8, 32) | |
217 VEC_AVX2_DEFINE_OPERATIONS(16, 16) | |
218 VEC_AVX2_DEFINE_OPERATIONS(32, 8) | |
219 VEC_AVX2_DEFINE_OPERATIONS(64, 4) |