Mercurial > vec
comparison src/impl/x86/avx512f.c @ 28:c6c99ab1088a
*: add min/max functions and a big big refactor (again)
agh, this time I added a few more implementations (and generally
made the code just a little faster...)
| author | Paper <paper@tflc.us> |
|---|---|
| date | Thu, 24 Apr 2025 00:54:02 -0400 |
| parents | e49e70f7012f |
| children | bf6ad516f1e6 |
comparison
equal
deleted
inserted
replaced
| 27:d00b95f95dd1 | 28:c6c99ab1088a |
|---|---|
| 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 * SOFTWARE. | 22 * SOFTWARE. |
| 23 **/ | 23 **/ |
| 24 | 24 |
| 25 #include "vec/impl/x86/avx512f.h" | 25 #include "vec/impl/x86/avx512f.h" |
| 26 #include "vec/impl/generic.h" | |
| 27 | 26 |
| 28 #include <immintrin.h> | 27 #include <immintrin.h> |
| 29 | 28 |
| 30 // this is a stupid amount of work just to do these operations, is it really worth it ? | 29 /* ------------------------------------------------------------------------ */ |
| 31 // also same note in avx2.c applies here, these do not handle sign bits properly, which | 30 |
| 32 // isn't that big of a deal for regular arithmetic operations, but matters quite a bit | 31 #define VEC_AVX512F_MINMAX_TEMPLATE(SIGN, BITS, SIZE, INTLSIGN, OP) \ |
| 33 // when doing things like arithmetic shifts. | 32 VEC_FUNC_IMPL v##SIGN##int##BITS##x##SIZE v##SIGN##int##BITS##x##SIZE##_avx512f_##OP(v##SIGN##int##BITS##x##SIZE vec1, v##SIGN##int##BITS##x##SIZE vec2) \ |
| 34 #define VEC_AVX512F_OPERATION_8x64(op, sign) \ | 33 { \ |
| 35 do { \ | 34 union v##SIGN##int##BITS##x##SIZE##_impl_data *vec1d = (union v##SIGN##int##BITS##x##SIZE##_impl_data *)&vec1; \ |
| 36 union v##sign##int8x64_impl_data *vec1d = (union v##sign##int8x64_impl_data *)&vec1; \ | 35 union v##SIGN##int##BITS##x##SIZE##_impl_data *vec2d = (union v##SIGN##int##BITS##x##SIZE##_impl_data *)&vec2; \ |
| 37 union v##sign##int8x64_impl_data *vec2d = (union v##sign##int8x64_impl_data *)&vec2; \ | |
| 38 \ | 36 \ |
| 39 /* unpack and operate */ \ | 37 vec1d->avx512f = _mm512_##OP##_ep##INTLSIGN##BITS(vec1d->avx512f, vec2d->avx512f); \ |
| 40 __m512i dst_1 = _mm512_##op##_epi32(_mm512_srli_epi32(_mm512_slli_epi32(vec1d->avx512f, 24), 24), _mm512_srli_epi32(_mm512_slli_epi32(vec2d->avx512f, 24), 24)); \ | |
| 41 __m512i dst_2 = _mm512_##op##_epi32(_mm512_srli_epi32(_mm512_slli_epi32(vec1d->avx512f, 16), 24), _mm512_srli_epi32(_mm512_slli_epi32(vec2d->avx512f, 16), 24)); \ | |
| 42 __m512i dst_3 = _mm512_##op##_epi32(_mm512_srli_epi32(_mm512_slli_epi32(vec1d->avx512f, 8), 24), _mm512_srli_epi32(_mm512_slli_epi32(vec2d->avx512f, 8), 24)); \ | |
| 43 __m512i dst_4 = _mm512_##op##_epi32(_mm512_srli_epi32(vec1d->avx512f, 24), _mm512_srli_epi32(vec2d->avx512f, 24)); \ | |
| 44 \ | |
| 45 /* repack */ \ | |
| 46 vec1d->avx512f = _mm512_or_si512( \ | |
| 47 _mm512_or_si512( \ | |
| 48 _mm512_srli_epi32(_mm512_slli_epi32(dst_1, 24), 24), \ | |
| 49 _mm512_srli_epi32(_mm512_slli_epi32(dst_2, 24), 16) \ | |
| 50 ), \ | |
| 51 _mm512_or_si512( \ | |
| 52 _mm512_srli_epi32(_mm512_slli_epi32(dst_3, 24), 8), \ | |
| 53 _mm512_slli_epi32(dst_4, 24) \ | |
| 54 ) \ | |
| 55 ); \ | |
| 56 \ | 38 \ |
| 57 return vec1d->vec; \ | 39 return vec1d->vec; \ |
| 58 } while (0) | 40 } |
| 59 | 41 |
| 60 #define VEC_AVX512F_OPERATION_16x32(op, sign) \ | 42 #define VEC_AVX512F_MINMAX_32x16(OP) VEC_AVX512F_MINMAX_TEMPLATE( , 32, 16, i, OP) |
| 61 do { \ | 43 #define VEC_AVX512F_MINMAX_u32x16(OP) VEC_AVX512F_MINMAX_TEMPLATE(u, 32, 16, u, OP) |
| 62 union v##sign##int16x32_impl_data *vec1d = (union v##sign##int16x32_impl_data *)&vec1; \ | 44 #define VEC_AVX512F_MINMAX_64x8(OP) VEC_AVX512F_MINMAX_TEMPLATE( , 64, 8, i, OP) |
| 63 union v##sign##int16x32_impl_data *vec2d = (union v##sign##int16x32_impl_data *)&vec2; \ | 45 #define VEC_AVX512F_MINMAX_u64x8(OP) VEC_AVX512F_MINMAX_TEMPLATE(u, 64, 8, u, OP) |
| 46 | |
| 47 #define VEC_AVX512F_STRUCT_MINMAX_32x16(OP, SIGN) v##SIGN##int32x16_avx512f_##OP | |
| 48 #define VEC_AVX512F_STRUCT_MINMAX_64x8(OP, SIGN) v##SIGN##int64x8_avx512f_##OP | |
| 49 | |
| 50 /* ------------------------------------------------------------------------ */ | |
| 51 | |
| 52 #define VEC_AVX512F_OPERATION_EX(name, op, sign, bits, size, secondsign) \ | |
| 53 VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_##name(v##sign##int##bits##x##size vec1, v##secondsign##int##bits##x##size vec2) \ | |
| 54 { \ | |
| 55 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ | |
| 56 union v##secondsign##int##bits##x##size##_impl_data *vec2d = (union v##secondsign##int##bits##x##size##_impl_data *)&vec2; \ | |
| 64 \ | 57 \ |
| 65 /* unpack and operate; it would be nice if we had an _m512_andi_epi32... */ \ | 58 vec1d->avx512f = _mm512_##op##_epi##bits(vec1d->avx512f, vec2d->avx512f); \ |
| 66 __m512i dst_1 = _mm512_##op##_epi32(_mm512_srli_epi32(_mm512_slli_epi32(vec1d->avx512f, 16), 16), _mm512_srli_epi32(_mm512_slli_epi32(vec2d->avx512f, 16), 16)); \ | |
| 67 __m512i dst_2 = _mm512_##op##_epi32(_mm512_srli_epi32(vec1d->avx512f, 16), _mm512_srli_epi32(vec2d->avx512f, 16)); \ | |
| 68 \ | 59 \ |
| 69 /* repack */ \ | |
| 70 vec1d->avx512f = _mm512_or_si512( \ | |
| 71 _mm512_srli_epi32(_mm512_slli_epi32(dst_1, 16), 16), \ | |
| 72 _mm512_slli_epi32(dst_2, 16) \ | |
| 73 ); \ | |
| 74 return vec1d->vec; \ | 60 return vec1d->vec; \ |
| 75 } while (0) | 61 } |
| 76 | 62 |
| 77 #define VEC_AVX512F_ADD_8x64(sign) \ | 63 #define VEC_AVX512F_OPERATION(name, op, sign, bits, size) \ |
| 78 VEC_AVX512F_OPERATION_8x64(add, sign) | 64 VEC_AVX512F_OPERATION_EX(name, op, sign, bits, size, sign) |
| 79 | 65 |
| 80 #define VEC_AVX512F_ADD_16x32(sign) \ | 66 #define VEC_AVX512F_OPERATION_SHIFT(name, op, sign, bits, size) \ |
| 81 VEC_AVX512F_OPERATION_16x32(add, sign) | 67 VEC_AVX512F_OPERATION_EX(name, op, sign, bits, size, u) |
| 82 | 68 |
| 83 #define VEC_AVX512F_ADD_32x16(sign) \ | 69 #define VEC_AVX512F_ADD_32x16(sign) VEC_AVX512F_OPERATION(add, add, sign, 32, 16) |
| 84 do { \ | 70 #define VEC_AVX512F_ADD_64x8(sign) VEC_AVX512F_OPERATION(add, add, sign, 64, 8) |
| 85 union v##sign##int32x16_impl_data *vec1d = (union v##sign##int32x16_impl_data *)&vec1; \ | |
| 86 union v##sign##int32x16_impl_data *vec2d = (union v##sign##int32x16_impl_data *)&vec2; \ | |
| 87 \ | |
| 88 vec1d->avx512f = _mm512_add_epi32(vec1d->avx512f, vec2d->avx512f); \ | |
| 89 return vec1d->vec; \ | |
| 90 } while (0) | |
| 91 | 71 |
| 92 #define VEC_AVX512F_ADD_64x8(sign) \ | 72 #define VEC_AVX512F_SUB_32x16(sign) VEC_AVX512F_OPERATION(sub, sub, sign, 32, 16) |
| 93 do { \ | 73 #define VEC_AVX512F_SUB_64x8(sign) VEC_AVX512F_OPERATION(sub, sub, sign, 64, 8) |
| 94 union v##sign##int64x8_impl_data *vec1d = (union v##sign##int64x8_impl_data *)&vec1; \ | |
| 95 union v##sign##int64x8_impl_data *vec2d = (union v##sign##int64x8_impl_data *)&vec2; \ | |
| 96 \ | |
| 97 vec1d->avx512f = _mm512_add_epi64(vec1d->avx512f, vec2d->avx512f); \ | |
| 98 return vec1d->vec; \ | |
| 99 } while (0) | |
| 100 | 74 |
| 101 #define VEC_AVX512F_SUB_8x64(sign) \ | 75 #define VEC_AVX512F_MUL_32x16(sign) VEC_AVX512F_OPERATION(mul, mullo, sign, 32, 16) |
| 102 VEC_AVX512F_OPERATION_8x64(sub, sign) | 76 #define VEC_AVX512F_MUL_64x8(sign) VEC_AVX512F_OPERATION(mul, mullox, sign, 64, 8) |
| 103 | 77 |
| 104 #define VEC_AVX512F_SUB_16x32(sign) \ | 78 #define VEC_AVX512F_LSHIFT_32x16(sign) VEC_AVX512F_OPERATION_SHIFT(lshift, sllv, sign, 32, 16) |
| 105 VEC_AVX512F_OPERATION_16x32(sub, sign) | 79 #define VEC_AVX512F_LSHIFT_64x8(sign) VEC_AVX512F_OPERATION_SHIFT(lshift, sllv, sign, 64, 8) |
| 106 | 80 |
| 107 #define VEC_AVX512F_SUB_32x16(sign) \ | 81 #define VEC_AVX512F_XRSHIFT(name, bits, size, sign, aORl) \ |
| 108 do { \ | 82 VEC_AVX512F_OPERATION_SHIFT(name, sr##aORl##v, sign, bits, size) |
| 109 union v##sign##int32x16_impl_data *vec1d = (union v##sign##int32x16_impl_data *)&vec1; \ | |
| 110 union v##sign##int32x16_impl_data *vec2d = (union v##sign##int32x16_impl_data *)&vec2; \ | |
| 111 \ | |
| 112 vec1d->avx512f = _mm512_sub_epi32(vec1d->avx512f, vec2d->avx512f); \ | |
| 113 return vec1d->vec; \ | |
| 114 } while (0) | |
| 115 | 83 |
| 116 #define VEC_AVX512F_SUB_64x8(sign) \ | 84 /* always the same, regardless of signedness */ |
| 117 do { \ | 85 #define VEC_AVX512F_LRSHIFT_32x16(sign) VEC_AVX512F_XRSHIFT(lrshift, 32, 16, sign, l) |
| 118 union v##sign##int64x8_impl_data *vec1d = (union v##sign##int64x8_impl_data *)&vec1; \ | 86 #define VEC_AVX512F_LRSHIFT_64x8(sign) VEC_AVX512F_XRSHIFT(lrshift, 64, 8, sign, l) |
| 119 union v##sign##int64x8_impl_data *vec2d = (union v##sign##int64x8_impl_data *)&vec2; \ | |
| 120 \ | |
| 121 vec1d->avx512f = _mm512_sub_epi64(vec1d->avx512f, vec2d->avx512f); \ | |
| 122 return vec1d->vec; \ | |
| 123 } while (0) | |
| 124 | 87 |
| 125 #define VEC_AVX512F_MUL_8x64(sign) \ | 88 #define VEC_AVX512F_RSHIFT_32x16(sign) VEC_AVX512F_XRSHIFT(rshift, 32, 16, sign, a) |
| 126 VEC_AVX512F_OPERATION_8x64(mullo, sign) | 89 #define VEC_AVX512F_RSHIFT_64x8(sign) VEC_AVX512F_XRSHIFT(rshift, 64, 8, sign, a) |
| 127 | 90 |
| 128 #define VEC_AVX512F_MUL_16x32(sign) \ | 91 #define VEC_AVX512F_uRSHIFT_32x16(sign) VEC_AVX512F_XRSHIFT(rshift, 32, 16, sign, l) |
| 129 VEC_AVX512F_OPERATION_16x32(mullo, sign) | 92 #define VEC_AVX512F_uRSHIFT_64x8(sign) VEC_AVX512F_XRSHIFT(rshift, 64, 8, sign, l) |
| 130 | 93 |
| 131 #define VEC_AVX512F_MUL_32x16(sign) \ | 94 /* ok */ |
| 132 do { \ | 95 #define VEC_AVX512F_STRUCT_OPERATION_32x16(OP, SIGN) v##SIGN##int32x16_avx512f_##OP |
| 133 union v##sign##int32x16_impl_data *vec1d = (union v##sign##int32x16_impl_data *)&vec1; \ | 96 #define VEC_AVX512F_STRUCT_OPERATION_64x8(OP, SIGN) v##SIGN##int64x8_avx512f_##OP |
| 134 union v##sign##int32x16_impl_data *vec2d = (union v##sign##int32x16_impl_data *)&vec2; \ | |
| 135 \ | |
| 136 vec1d->avx512f = _mm512_mullo_epi32(vec1d->avx512f, vec2d->avx512f); \ | |
| 137 return vec1d->vec; \ | |
| 138 } while (0) | |
| 139 | 97 |
| 140 #define VEC_AVX512F_MUL_64x8(sign) \ | 98 /* ------------------------------------------------------------------------ */ |
| 141 do { \ | |
| 142 union v##sign##int64x8_impl_data *vec1d = (union v##sign##int64x8_impl_data *)&vec1; \ | |
| 143 union v##sign##int64x8_impl_data *vec2d = (union v##sign##int64x8_impl_data *)&vec2; \ | |
| 144 \ | |
| 145 __m512i ac = _mm512_mul_epu32(vec1d->avx512f, vec2d->avx512f); \ | |
| 146 __m512i b = _mm512_srli_epi64(vec1d->avx512f, 32); \ | |
| 147 __m512i bc = _mm512_mul_epu32(b, vec2d->avx512f); \ | |
| 148 __m512i d = _mm512_srli_epi64(vec2d->avx512f, 32); \ | |
| 149 __m512i ad = _mm512_mul_epu32(vec1d->avx512f, d); \ | |
| 150 __m512i hi = _mm512_add_epi64(bc, ad); \ | |
| 151 hi = _mm512_slli_epi64(hi, 32); \ | |
| 152 \ | |
| 153 vec1d->avx512f = _mm512_add_epi64(hi, ac); \ | |
| 154 return vec1d->vec; \ | |
| 155 } while (0) | |
| 156 | |
| 157 #define VEC_AVX512F_LSHIFT_8x64(sign) \ | |
| 158 VEC_AVX512F_OPERATION_8x64(sllv, sign) | |
| 159 | |
| 160 #define VEC_AVX512F_LSHIFT_16x32(sign) \ | |
| 161 VEC_AVX512F_OPERATION_16x32(sllv, sign) | |
| 162 | |
| 163 #define VEC_AVX512F_LSHIFT_32x16(sign) \ | |
| 164 do { \ | |
| 165 union v##sign##int32x16_impl_data *vec1d = (union v##sign##int32x16_impl_data *)&vec1; \ | |
| 166 union v##sign##int32x16_impl_data *vec2d = (union v##sign##int32x16_impl_data *)&vec2; \ | |
| 167 \ | |
| 168 vec1d->avx512f = _mm512_sllv_epi32(vec1d->avx512f, vec2d->avx512f); \ | |
| 169 return vec1d->vec; \ | |
| 170 } while (0) | |
| 171 | |
| 172 #define VEC_AVX512F_LSHIFT_64x8(sign) \ | |
| 173 do { \ | |
| 174 union v##sign##int64x8_impl_data *vec1d = (union v##sign##int64x8_impl_data *)&vec1; \ | |
| 175 union v##sign##int64x8_impl_data *vec2d = (union v##sign##int64x8_impl_data *)&vec2; \ | |
| 176 \ | |
| 177 vec1d->avx512f = _mm512_sllv_epi64(vec1d->avx512f, vec2d->avx512f); \ | |
| 178 return vec1d->vec; \ | |
| 179 } while (0) | |
| 180 | |
| 181 #define VEC_AVX512F_lRSHIFT_8x64(sign) \ | |
| 182 VEC_AVX512F_OPERATION_8x64(srlv, sign) | |
| 183 | |
| 184 #define VEC_AVX512F_lRSHIFT_16x32(sign) \ | |
| 185 VEC_AVX512F_OPERATION_16x32(srlv, sign) | |
| 186 | |
| 187 #define VEC_AVX512F_aRSHIFT_8x64(sign) \ | |
| 188 do { \ | |
| 189 return v##sign##int8x64_generic_rshift(vec1, vec2); \ | |
| 190 } while (0) | |
| 191 | |
| 192 #define VEC_AVX512F_aRSHIFT_16x32(sign) \ | |
| 193 do { \ | |
| 194 return v##sign##int16x32_generic_rshift(vec1, vec2); \ | |
| 195 } while (0) | |
| 196 | |
| 197 #define VEC_AVX512F_RSHIFT_8x64(sign, aORl) VEC_AVX512F_##aORl##RSHIFT_8x64(sign) | |
| 198 #define VEC_AVX512F_RSHIFT_16x32(sign, aORl) VEC_AVX512F_##aORl##RSHIFT_16x32(sign) | |
| 199 | |
| 200 #define VEC_AVX512F_RSHIFT_32x16(sign, aORl) \ | |
| 201 do { \ | |
| 202 union v##sign##int32x16_impl_data *vec1d = (union v##sign##int32x16_impl_data *)&vec1; \ | |
| 203 union v##sign##int32x16_impl_data *vec2d = (union v##sign##int32x16_impl_data *)&vec2; \ | |
| 204 \ | |
| 205 vec1d->avx512f = _mm512_sr##aORl##v_epi32(vec1d->avx512f, vec2d->avx512f); \ | |
| 206 return vec1d->vec; \ | |
| 207 } while (0) | |
| 208 | |
| 209 #define VEC_AVX512F_RSHIFT_64x8(sign, aORl) \ | |
| 210 do { \ | |
| 211 union v##sign##int64x8_impl_data *vec1d = (union v##sign##int64x8_impl_data *)&vec1; \ | |
| 212 union v##sign##int64x8_impl_data *vec2d = (union v##sign##int64x8_impl_data *)&vec2; \ | |
| 213 \ | |
| 214 vec1d->avx512f = _mm512_sr##aORl##v_epi64(vec1d->avx512f, vec2d->avx512f); \ | |
| 215 return vec1d->vec; \ | |
| 216 } while (0) | |
| 217 | |
| 218 #define VEC_AVX512F_uRSHIFT_8x64(sign, aORl) VEC_AVX512F_RSHIFT_8x64(sign, l) | |
| 219 #define VEC_AVX512F_uRSHIFT_16x32(sign, aORl) VEC_AVX512F_RSHIFT_16x32(sign, l) | |
| 220 #define VEC_AVX512F_uRSHIFT_32x16(sign, aORl) VEC_AVX512F_RSHIFT_32x16(sign, l) | |
| 221 #define VEC_AVX512F_uRSHIFT_64x8(sign, aORl) VEC_AVX512F_RSHIFT_64x8(sign, l) | |
| 222 | 99 |
| 223 #define VEC_AVX512F_DEFINE_OPERATIONS_SIGN(sign, bits, size) \ | 100 #define VEC_AVX512F_DEFINE_OPERATIONS_SIGN(sign, bits, size) \ |
| 224 union v##sign##int##bits##x##size##_impl_data { \ | 101 union v##sign##int##bits##x##size##_impl_data { \ |
| 225 v##sign##int##bits##x##size vec; \ | 102 v##sign##int##bits##x##size vec; \ |
| 226 __m512i avx512f; \ | 103 __m512i avx512f; \ |
| 227 }; \ | 104 }; \ |
| 228 \ | 105 \ |
| 229 VEC_STATIC_ASSERT(VEC_ALIGNOF(__m512i) <= VEC_ALIGNOF(v##sign##int##bits##x##size), "vec: v" #sign "int" #bits "x" #size " alignment needs to be expanded to fit intrinsic type size"); \ | 106 VEC_STATIC_ASSERT(VEC_ALIGNOF(__m512i) <= VEC_ALIGNOF(v##sign##int##bits##x##size), "vec: v" #sign "int" #bits "x" #size " alignment needs to be expanded to fit intrinsic type size"); \ |
| 230 VEC_STATIC_ASSERT(sizeof(__m512i) <= sizeof(v##sign##int##bits##x##size), "vec: v" #sign "int" #bits "x" #size " needs to be expanded to fit intrinsic type size"); \ | 107 VEC_STATIC_ASSERT(sizeof(__m512i) <= sizeof(v##sign##int##bits##x##size), "vec: v" #sign "int" #bits "x" #size " needs to be expanded to fit intrinsic type size"); \ |
| 231 \ | 108 \ |
| 232 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_load_aligned(const vec_##sign##int##bits in[size]) \ | 109 VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_load_aligned(const vec_##sign##int##bits in[size]) \ |
| 233 { \ | 110 { \ |
| 234 union v##sign##int##bits##x##size##_impl_data vec; \ | 111 union v##sign##int##bits##x##size##_impl_data vec; \ |
| 235 vec.avx512f = _mm512_load_si512((const __m512i *)in); \ | 112 vec.avx512f = _mm512_load_si512((const __m512i *)in); \ |
| 236 return vec.vec; \ | 113 return vec.vec; \ |
| 237 } \ | 114 } \ |
| 238 \ | 115 \ |
| 239 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_load(const vec_##sign##int##bits in[size]) \ | 116 VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_load(const vec_##sign##int##bits in[size]) \ |
| 240 { \ | 117 { \ |
| 241 union v##sign##int##bits##x##size##_impl_data vec; \ | 118 union v##sign##int##bits##x##size##_impl_data vec; \ |
| 242 vec.avx512f = _mm512_loadu_si512((const __m512i *)in); \ | 119 vec.avx512f = _mm512_loadu_si512((const __m512i *)in); \ |
| 243 return vec.vec; \ | 120 return vec.vec; \ |
| 244 } \ | 121 } \ |
| 245 \ | 122 \ |
| 246 static void v##sign##int##bits##x##size##_avx512f_store_aligned(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \ | 123 VEC_FUNC_IMPL void v##sign##int##bits##x##size##_avx512f_store_aligned(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \ |
| 247 { \ | 124 { \ |
| 248 _mm512_store_si512((__m512i *)out, ((union v##sign##int##bits##x##size##_impl_data *)&vec)->avx512f); \ | 125 _mm512_store_si512((__m512i *)out, ((union v##sign##int##bits##x##size##_impl_data *)&vec)->avx512f); \ |
| 249 } \ | 126 } \ |
| 250 \ | 127 \ |
| 251 static void v##sign##int##bits##x##size##_avx512f_store(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \ | 128 VEC_FUNC_IMPL void v##sign##int##bits##x##size##_avx512f_store(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \ |
| 252 { \ | 129 { \ |
| 253 _mm512_storeu_si512((__m512i *)out, ((union v##sign##int##bits##x##size##_impl_data *)&vec)->avx512f); \ | 130 _mm512_storeu_si512((__m512i *)out, ((union v##sign##int##bits##x##size##_impl_data *)&vec)->avx512f); \ |
| 254 } \ | 131 } \ |
| 255 \ | 132 \ |
| 256 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_add(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ | 133 VEC_AVX512F_ADD_##bits##x##size(sign) \ |
| 257 { \ | 134 VEC_AVX512F_SUB_##bits##x##size(sign) \ |
| 258 VEC_AVX512F_ADD_##bits##x##size(sign); \ | 135 VEC_AVX512F_MUL_##bits##x##size(sign) \ |
| 259 } \ | |
| 260 \ | 136 \ |
| 261 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_sub(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ | 137 VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_and(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ |
| 262 { \ | |
| 263 VEC_AVX512F_SUB_##bits##x##size(sign); \ | |
| 264 } \ | |
| 265 \ | |
| 266 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_mul(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ | |
| 267 { \ | |
| 268 VEC_AVX512F_MUL_##bits##x##size(sign); \ | |
| 269 } \ | |
| 270 \ | |
| 271 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_and(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ | |
| 272 { \ | 138 { \ |
| 273 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ | 139 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ |
| 274 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ | 140 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ |
| 275 \ | 141 \ |
| 276 vec1d->avx512f = _mm512_and_si512(vec1d->avx512f, vec2d->avx512f); \ | 142 vec1d->avx512f = _mm512_and_si512(vec1d->avx512f, vec2d->avx512f); \ |
| 277 return vec1d->vec; \ | 143 return vec1d->vec; \ |
| 278 } \ | 144 } \ |
| 279 \ | 145 \ |
| 280 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_or(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ | 146 VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_or(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ |
| 281 { \ | 147 { \ |
| 282 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ | 148 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ |
| 283 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ | 149 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ |
| 284 \ | 150 \ |
| 285 vec1d->avx512f = _mm512_or_si512(vec1d->avx512f, vec2d->avx512f); \ | 151 vec1d->avx512f = _mm512_or_si512(vec1d->avx512f, vec2d->avx512f); \ |
| 286 return vec1d->vec; \ | 152 return vec1d->vec; \ |
| 287 } \ | 153 } \ |
| 288 \ | 154 \ |
| 289 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_xor(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ | 155 VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_xor(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ |
| 290 { \ | 156 { \ |
| 291 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ | 157 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ |
| 292 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ | 158 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ |
| 293 \ | 159 \ |
| 294 vec1d->avx512f = _mm512_xor_si512(vec1d->avx512f, vec2d->avx512f); \ | 160 vec1d->avx512f = _mm512_xor_si512(vec1d->avx512f, vec2d->avx512f); \ |
| 295 return vec1d->vec; \ | 161 return vec1d->vec; \ |
| 296 } \ | 162 } \ |
| 297 \ | 163 \ |
| 298 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_lshift(v##sign##int##bits##x##size vec1, vuint##bits##x##size vec2) \ | 164 VEC_AVX512F_LSHIFT_##bits##x##size(sign); \ |
| 299 { \ | 165 VEC_AVX512F_##sign##RSHIFT_##bits##x##size(sign); \ |
| 300 VEC_AVX512F_LSHIFT_##bits##x##size(sign); \ | 166 VEC_AVX512F_LRSHIFT_##bits##x##size(sign); \ |
| 301 } \ | |
| 302 \ | 167 \ |
| 303 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_rshift(v##sign##int##bits##x##size vec1, vuint##bits##x##size vec2) \ | 168 VEC_AVX512F_MINMAX_##sign##bits##x##size(min) \ |
| 304 { \ | 169 VEC_AVX512F_MINMAX_##sign##bits##x##size(max) \ |
| 305 VEC_AVX512F_##sign##RSHIFT_##bits##x##size(sign, a); \ | |
| 306 } \ | |
| 307 \ | |
| 308 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_lrshift(v##sign##int##bits##x##size vec1, vuint##bits##x##size vec2) \ | |
| 309 { \ | |
| 310 VEC_AVX512F_RSHIFT_##bits##x##size(sign, l); \ | |
| 311 } \ | |
| 312 \ | 170 \ |
| 313 const v##sign##int##bits##x##size##_impl v##sign##int##bits##x##size##_impl_avx512f = { \ | 171 const v##sign##int##bits##x##size##_impl v##sign##int##bits##x##size##_impl_avx512f = { \ |
| 314 v##sign##int##bits##x##size##_generic_splat, \ | 172 .load_aligned = v##sign##int##bits##x##size##_avx512f_load_aligned, \ |
| 315 v##sign##int##bits##x##size##_avx512f_load_aligned, \ | 173 .load = v##sign##int##bits##x##size##_avx512f_load, \ |
| 316 v##sign##int##bits##x##size##_avx512f_load, \ | 174 .store_aligned = v##sign##int##bits##x##size##_avx512f_store_aligned, \ |
| 317 v##sign##int##bits##x##size##_avx512f_store_aligned, \ | 175 .store = v##sign##int##bits##x##size##_avx512f_store, \ |
| 318 v##sign##int##bits##x##size##_avx512f_store, \ | 176 .add = VEC_AVX512F_STRUCT_OPERATION_##bits##x##size(add, sign), \ |
| 319 v##sign##int##bits##x##size##_avx512f_add, \ | 177 .sub = VEC_AVX512F_STRUCT_OPERATION_##bits##x##size(sub, sign), \ |
| 320 v##sign##int##bits##x##size##_avx512f_sub, \ | 178 .mul = VEC_AVX512F_STRUCT_OPERATION_##bits##x##size(mul, sign), \ |
| 321 v##sign##int##bits##x##size##_avx512f_mul, \ | 179 .band = v##sign##int##bits##x##size##_avx512f_and, \ |
| 322 v##sign##int##bits##x##size##_generic_div, \ | 180 .bor = v##sign##int##bits##x##size##_avx512f_or, \ |
| 323 v##sign##int##bits##x##size##_generic_avg, \ | 181 .bxor = v##sign##int##bits##x##size##_avx512f_xor, \ |
| 324 v##sign##int##bits##x##size##_avx512f_and, \ | 182 .lshift = VEC_AVX512F_STRUCT_OPERATION_##bits##x##size(lshift, sign), \ |
| 325 v##sign##int##bits##x##size##_avx512f_or, \ | 183 .rshift = VEC_AVX512F_STRUCT_OPERATION_##bits##x##size(rshift, sign), \ |
| 326 v##sign##int##bits##x##size##_avx512f_xor, \ | 184 .lrshift = VEC_AVX512F_STRUCT_OPERATION_##bits##x##size(lrshift, sign), \ |
| 327 v##sign##int##bits##x##size##_generic_not, \ | 185 .min = VEC_AVX512F_STRUCT_MINMAX_##bits##x##size(min, sign), \ |
| 328 v##sign##int##bits##x##size##_avx512f_lshift, \ | 186 .max = VEC_AVX512F_STRUCT_MINMAX_##bits##x##size(max, sign), \ |
| 329 v##sign##int##bits##x##size##_avx512f_rshift, \ | |
| 330 v##sign##int##bits##x##size##_avx512f_lrshift, \ | |
| 331 v##sign##int##bits##x##size##_generic_cmplt, \ | |
| 332 v##sign##int##bits##x##size##_generic_cmple, \ | |
| 333 v##sign##int##bits##x##size##_generic_cmpeq, \ | |
| 334 v##sign##int##bits##x##size##_generic_cmpge, \ | |
| 335 v##sign##int##bits##x##size##_generic_cmpgt, \ | |
| 336 }; | 187 }; |
| 337 | 188 |
| 338 #define VEC_AVX512F_DEFINE_OPERATIONS(bits, size) \ | 189 #define VEC_AVX512F_DEFINE_OPERATIONS(bits, size) \ |
| 339 VEC_AVX512F_DEFINE_OPERATIONS_SIGN(u, bits, size) \ | 190 VEC_AVX512F_DEFINE_OPERATIONS_SIGN(u, bits, size) \ |
| 340 VEC_AVX512F_DEFINE_OPERATIONS_SIGN( , bits, size) | 191 VEC_AVX512F_DEFINE_OPERATIONS_SIGN( , bits, size) |
| 341 | 192 |
| 342 VEC_AVX512F_DEFINE_OPERATIONS(8, 64) | |
| 343 VEC_AVX512F_DEFINE_OPERATIONS(16, 32) | |
| 344 VEC_AVX512F_DEFINE_OPERATIONS(32, 16) | 193 VEC_AVX512F_DEFINE_OPERATIONS(32, 16) |
| 345 VEC_AVX512F_DEFINE_OPERATIONS(64, 8) | 194 VEC_AVX512F_DEFINE_OPERATIONS(64, 8) |
