Mercurial > vec
diff src/impl/x86/avx512f.c @ 31:bf6ad516f1e6
Backed out changeset c6c99ab1088a
author | Paper <paper@tflc.us> |
---|---|
date | Fri, 25 Apr 2025 17:40:33 -0400 |
parents | c6c99ab1088a |
children | 99e4539f922f |
line wrap: on
line diff
--- a/src/impl/x86/avx512f.c Fri Apr 25 17:40:30 2025 -0400 +++ b/src/impl/x86/avx512f.c Fri Apr 25 17:40:33 2025 -0400 @@ -23,79 +23,202 @@ **/ #include "vec/impl/x86/avx512f.h" +#include "vec/impl/generic.h" #include <immintrin.h> -/* ------------------------------------------------------------------------ */ - -#define VEC_AVX512F_MINMAX_TEMPLATE(SIGN, BITS, SIZE, INTLSIGN, OP) \ - VEC_FUNC_IMPL v##SIGN##int##BITS##x##SIZE v##SIGN##int##BITS##x##SIZE##_avx512f_##OP(v##SIGN##int##BITS##x##SIZE vec1, v##SIGN##int##BITS##x##SIZE vec2) \ - { \ - union v##SIGN##int##BITS##x##SIZE##_impl_data *vec1d = (union v##SIGN##int##BITS##x##SIZE##_impl_data *)&vec1; \ - union v##SIGN##int##BITS##x##SIZE##_impl_data *vec2d = (union v##SIGN##int##BITS##x##SIZE##_impl_data *)&vec2; \ - \ - vec1d->avx512f = _mm512_##OP##_ep##INTLSIGN##BITS(vec1d->avx512f, vec2d->avx512f); \ +// this is a stupid amount of work just to do these operations, is it really worth it ? +// also same note in avx2.c applies here, these do not handle sign bits properly, which +// isn't that big of a deal for regular arithmetic operations, but matters quite a bit +// when doing things like arithmetic shifts. +#define VEC_AVX512F_OPERATION_8x64(op, sign) \ + do { \ + union v##sign##int8x64_impl_data *vec1d = (union v##sign##int8x64_impl_data *)&vec1; \ + union v##sign##int8x64_impl_data *vec2d = (union v##sign##int8x64_impl_data *)&vec2; \ \ - return vec1d->vec; \ - } - -#define VEC_AVX512F_MINMAX_32x16(OP) VEC_AVX512F_MINMAX_TEMPLATE( , 32, 16, i, OP) -#define VEC_AVX512F_MINMAX_u32x16(OP) VEC_AVX512F_MINMAX_TEMPLATE(u, 32, 16, u, OP) -#define VEC_AVX512F_MINMAX_64x8(OP) VEC_AVX512F_MINMAX_TEMPLATE( , 64, 8, i, OP) -#define VEC_AVX512F_MINMAX_u64x8(OP) VEC_AVX512F_MINMAX_TEMPLATE(u, 64, 8, u, OP) - -#define VEC_AVX512F_STRUCT_MINMAX_32x16(OP, SIGN) v##SIGN##int32x16_avx512f_##OP -#define VEC_AVX512F_STRUCT_MINMAX_64x8(OP, SIGN) v##SIGN##int64x8_avx512f_##OP - -/* ------------------------------------------------------------------------ */ - -#define VEC_AVX512F_OPERATION_EX(name, op, sign, bits, size, secondsign) \ - VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_##name(v##sign##int##bits##x##size vec1, v##secondsign##int##bits##x##size vec2) \ - { \ - union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ - union v##secondsign##int##bits##x##size##_impl_data *vec2d = (union v##secondsign##int##bits##x##size##_impl_data *)&vec2; \ + /* unpack and operate */ \ + __m512i dst_1 = _mm512_##op##_epi32(_mm512_srli_epi32(_mm512_slli_epi32(vec1d->avx512f, 24), 24), _mm512_srli_epi32(_mm512_slli_epi32(vec2d->avx512f, 24), 24)); \ + __m512i dst_2 = _mm512_##op##_epi32(_mm512_srli_epi32(_mm512_slli_epi32(vec1d->avx512f, 16), 24), _mm512_srli_epi32(_mm512_slli_epi32(vec2d->avx512f, 16), 24)); \ + __m512i dst_3 = _mm512_##op##_epi32(_mm512_srli_epi32(_mm512_slli_epi32(vec1d->avx512f, 8), 24), _mm512_srli_epi32(_mm512_slli_epi32(vec2d->avx512f, 8), 24)); \ + __m512i dst_4 = _mm512_##op##_epi32(_mm512_srli_epi32(vec1d->avx512f, 24), _mm512_srli_epi32(vec2d->avx512f, 24)); \ \ - vec1d->avx512f = _mm512_##op##_epi##bits(vec1d->avx512f, vec2d->avx512f); \ + /* repack */ \ + vec1d->avx512f = _mm512_or_si512( \ + _mm512_or_si512( \ + _mm512_srli_epi32(_mm512_slli_epi32(dst_1, 24), 24), \ + _mm512_srli_epi32(_mm512_slli_epi32(dst_2, 24), 16) \ + ), \ + _mm512_or_si512( \ + _mm512_srli_epi32(_mm512_slli_epi32(dst_3, 24), 8), \ + _mm512_slli_epi32(dst_4, 24) \ + ) \ + ); \ \ return vec1d->vec; \ - } + } while (0) -#define VEC_AVX512F_OPERATION(name, op, sign, bits, size) \ - VEC_AVX512F_OPERATION_EX(name, op, sign, bits, size, sign) +#define VEC_AVX512F_OPERATION_16x32(op, sign) \ + do { \ + union v##sign##int16x32_impl_data *vec1d = (union v##sign##int16x32_impl_data *)&vec1; \ + union v##sign##int16x32_impl_data *vec2d = (union v##sign##int16x32_impl_data *)&vec2; \ + \ + /* unpack and operate; it would be nice if we had an _m512_andi_epi32... */ \ + __m512i dst_1 = _mm512_##op##_epi32(_mm512_srli_epi32(_mm512_slli_epi32(vec1d->avx512f, 16), 16), _mm512_srli_epi32(_mm512_slli_epi32(vec2d->avx512f, 16), 16)); \ + __m512i dst_2 = _mm512_##op##_epi32(_mm512_srli_epi32(vec1d->avx512f, 16), _mm512_srli_epi32(vec2d->avx512f, 16)); \ + \ + /* repack */ \ + vec1d->avx512f = _mm512_or_si512( \ + _mm512_srli_epi32(_mm512_slli_epi32(dst_1, 16), 16), \ + _mm512_slli_epi32(dst_2, 16) \ + ); \ + return vec1d->vec; \ + } while (0) -#define VEC_AVX512F_OPERATION_SHIFT(name, op, sign, bits, size) \ - VEC_AVX512F_OPERATION_EX(name, op, sign, bits, size, u) +#define VEC_AVX512F_ADD_8x64(sign) \ + VEC_AVX512F_OPERATION_8x64(add, sign) + +#define VEC_AVX512F_ADD_16x32(sign) \ + VEC_AVX512F_OPERATION_16x32(add, sign) + +#define VEC_AVX512F_ADD_32x16(sign) \ + do { \ + union v##sign##int32x16_impl_data *vec1d = (union v##sign##int32x16_impl_data *)&vec1; \ + union v##sign##int32x16_impl_data *vec2d = (union v##sign##int32x16_impl_data *)&vec2; \ + \ + vec1d->avx512f = _mm512_add_epi32(vec1d->avx512f, vec2d->avx512f); \ + return vec1d->vec; \ + } while (0) -#define VEC_AVX512F_ADD_32x16(sign) VEC_AVX512F_OPERATION(add, add, sign, 32, 16) -#define VEC_AVX512F_ADD_64x8(sign) VEC_AVX512F_OPERATION(add, add, sign, 64, 8) +#define VEC_AVX512F_ADD_64x8(sign) \ + do { \ + union v##sign##int64x8_impl_data *vec1d = (union v##sign##int64x8_impl_data *)&vec1; \ + union v##sign##int64x8_impl_data *vec2d = (union v##sign##int64x8_impl_data *)&vec2; \ + \ + vec1d->avx512f = _mm512_add_epi64(vec1d->avx512f, vec2d->avx512f); \ + return vec1d->vec; \ + } while (0) + +#define VEC_AVX512F_SUB_8x64(sign) \ + VEC_AVX512F_OPERATION_8x64(sub, sign) + +#define VEC_AVX512F_SUB_16x32(sign) \ + VEC_AVX512F_OPERATION_16x32(sub, sign) + +#define VEC_AVX512F_SUB_32x16(sign) \ + do { \ + union v##sign##int32x16_impl_data *vec1d = (union v##sign##int32x16_impl_data *)&vec1; \ + union v##sign##int32x16_impl_data *vec2d = (union v##sign##int32x16_impl_data *)&vec2; \ + \ + vec1d->avx512f = _mm512_sub_epi32(vec1d->avx512f, vec2d->avx512f); \ + return vec1d->vec; \ + } while (0) -#define VEC_AVX512F_SUB_32x16(sign) VEC_AVX512F_OPERATION(sub, sub, sign, 32, 16) -#define VEC_AVX512F_SUB_64x8(sign) VEC_AVX512F_OPERATION(sub, sub, sign, 64, 8) +#define VEC_AVX512F_SUB_64x8(sign) \ + do { \ + union v##sign##int64x8_impl_data *vec1d = (union v##sign##int64x8_impl_data *)&vec1; \ + union v##sign##int64x8_impl_data *vec2d = (union v##sign##int64x8_impl_data *)&vec2; \ + \ + vec1d->avx512f = _mm512_sub_epi64(vec1d->avx512f, vec2d->avx512f); \ + return vec1d->vec; \ + } while (0) + +#define VEC_AVX512F_MUL_8x64(sign) \ + VEC_AVX512F_OPERATION_8x64(mullo, sign) -#define VEC_AVX512F_MUL_32x16(sign) VEC_AVX512F_OPERATION(mul, mullo, sign, 32, 16) -#define VEC_AVX512F_MUL_64x8(sign) VEC_AVX512F_OPERATION(mul, mullox, sign, 64, 8) +#define VEC_AVX512F_MUL_16x32(sign) \ + VEC_AVX512F_OPERATION_16x32(mullo, sign) + +#define VEC_AVX512F_MUL_32x16(sign) \ + do { \ + union v##sign##int32x16_impl_data *vec1d = (union v##sign##int32x16_impl_data *)&vec1; \ + union v##sign##int32x16_impl_data *vec2d = (union v##sign##int32x16_impl_data *)&vec2; \ + \ + vec1d->avx512f = _mm512_mullo_epi32(vec1d->avx512f, vec2d->avx512f); \ + return vec1d->vec; \ + } while (0) -#define VEC_AVX512F_LSHIFT_32x16(sign) VEC_AVX512F_OPERATION_SHIFT(lshift, sllv, sign, 32, 16) -#define VEC_AVX512F_LSHIFT_64x8(sign) VEC_AVX512F_OPERATION_SHIFT(lshift, sllv, sign, 64, 8) +#define VEC_AVX512F_MUL_64x8(sign) \ + do { \ + union v##sign##int64x8_impl_data *vec1d = (union v##sign##int64x8_impl_data *)&vec1; \ + union v##sign##int64x8_impl_data *vec2d = (union v##sign##int64x8_impl_data *)&vec2; \ + \ + __m512i ac = _mm512_mul_epu32(vec1d->avx512f, vec2d->avx512f); \ + __m512i b = _mm512_srli_epi64(vec1d->avx512f, 32); \ + __m512i bc = _mm512_mul_epu32(b, vec2d->avx512f); \ + __m512i d = _mm512_srli_epi64(vec2d->avx512f, 32); \ + __m512i ad = _mm512_mul_epu32(vec1d->avx512f, d); \ + __m512i hi = _mm512_add_epi64(bc, ad); \ + hi = _mm512_slli_epi64(hi, 32); \ + \ + vec1d->avx512f = _mm512_add_epi64(hi, ac); \ + return vec1d->vec; \ + } while (0) + +#define VEC_AVX512F_LSHIFT_8x64(sign) \ + VEC_AVX512F_OPERATION_8x64(sllv, sign) -#define VEC_AVX512F_XRSHIFT(name, bits, size, sign, aORl) \ - VEC_AVX512F_OPERATION_SHIFT(name, sr##aORl##v, sign, bits, size) +#define VEC_AVX512F_LSHIFT_16x32(sign) \ + VEC_AVX512F_OPERATION_16x32(sllv, sign) -/* always the same, regardless of signedness */ -#define VEC_AVX512F_LRSHIFT_32x16(sign) VEC_AVX512F_XRSHIFT(lrshift, 32, 16, sign, l) -#define VEC_AVX512F_LRSHIFT_64x8(sign) VEC_AVX512F_XRSHIFT(lrshift, 64, 8, sign, l) +#define VEC_AVX512F_LSHIFT_32x16(sign) \ + do { \ + union v##sign##int32x16_impl_data *vec1d = (union v##sign##int32x16_impl_data *)&vec1; \ + union v##sign##int32x16_impl_data *vec2d = (union v##sign##int32x16_impl_data *)&vec2; \ + \ + vec1d->avx512f = _mm512_sllv_epi32(vec1d->avx512f, vec2d->avx512f); \ + return vec1d->vec; \ + } while (0) + +#define VEC_AVX512F_LSHIFT_64x8(sign) \ + do { \ + union v##sign##int64x8_impl_data *vec1d = (union v##sign##int64x8_impl_data *)&vec1; \ + union v##sign##int64x8_impl_data *vec2d = (union v##sign##int64x8_impl_data *)&vec2; \ + \ + vec1d->avx512f = _mm512_sllv_epi64(vec1d->avx512f, vec2d->avx512f); \ + return vec1d->vec; \ + } while (0) -#define VEC_AVX512F_RSHIFT_32x16(sign) VEC_AVX512F_XRSHIFT(rshift, 32, 16, sign, a) -#define VEC_AVX512F_RSHIFT_64x8(sign) VEC_AVX512F_XRSHIFT(rshift, 64, 8, sign, a) +#define VEC_AVX512F_lRSHIFT_8x64(sign) \ + VEC_AVX512F_OPERATION_8x64(srlv, sign) + +#define VEC_AVX512F_lRSHIFT_16x32(sign) \ + VEC_AVX512F_OPERATION_16x32(srlv, sign) -#define VEC_AVX512F_uRSHIFT_32x16(sign) VEC_AVX512F_XRSHIFT(rshift, 32, 16, sign, l) -#define VEC_AVX512F_uRSHIFT_64x8(sign) VEC_AVX512F_XRSHIFT(rshift, 64, 8, sign, l) +#define VEC_AVX512F_aRSHIFT_8x64(sign) \ + do { \ + return v##sign##int8x64_generic_rshift(vec1, vec2); \ + } while (0) + +#define VEC_AVX512F_aRSHIFT_16x32(sign) \ + do { \ + return v##sign##int16x32_generic_rshift(vec1, vec2); \ + } while (0) + +#define VEC_AVX512F_RSHIFT_8x64(sign, aORl) VEC_AVX512F_##aORl##RSHIFT_8x64(sign) +#define VEC_AVX512F_RSHIFT_16x32(sign, aORl) VEC_AVX512F_##aORl##RSHIFT_16x32(sign) -/* ok */ -#define VEC_AVX512F_STRUCT_OPERATION_32x16(OP, SIGN) v##SIGN##int32x16_avx512f_##OP -#define VEC_AVX512F_STRUCT_OPERATION_64x8(OP, SIGN) v##SIGN##int64x8_avx512f_##OP +#define VEC_AVX512F_RSHIFT_32x16(sign, aORl) \ + do { \ + union v##sign##int32x16_impl_data *vec1d = (union v##sign##int32x16_impl_data *)&vec1; \ + union v##sign##int32x16_impl_data *vec2d = (union v##sign##int32x16_impl_data *)&vec2; \ + \ + vec1d->avx512f = _mm512_sr##aORl##v_epi32(vec1d->avx512f, vec2d->avx512f); \ + return vec1d->vec; \ + } while (0) -/* ------------------------------------------------------------------------ */ +#define VEC_AVX512F_RSHIFT_64x8(sign, aORl) \ + do { \ + union v##sign##int64x8_impl_data *vec1d = (union v##sign##int64x8_impl_data *)&vec1; \ + union v##sign##int64x8_impl_data *vec2d = (union v##sign##int64x8_impl_data *)&vec2; \ + \ + vec1d->avx512f = _mm512_sr##aORl##v_epi64(vec1d->avx512f, vec2d->avx512f); \ + return vec1d->vec; \ + } while (0) + +#define VEC_AVX512F_uRSHIFT_8x64(sign, aORl) VEC_AVX512F_RSHIFT_8x64(sign, l) +#define VEC_AVX512F_uRSHIFT_16x32(sign, aORl) VEC_AVX512F_RSHIFT_16x32(sign, l) +#define VEC_AVX512F_uRSHIFT_32x16(sign, aORl) VEC_AVX512F_RSHIFT_32x16(sign, l) +#define VEC_AVX512F_uRSHIFT_64x8(sign, aORl) VEC_AVX512F_RSHIFT_64x8(sign, l) #define VEC_AVX512F_DEFINE_OPERATIONS_SIGN(sign, bits, size) \ union v##sign##int##bits##x##size##_impl_data { \ @@ -106,35 +229,46 @@ VEC_STATIC_ASSERT(VEC_ALIGNOF(__m512i) <= VEC_ALIGNOF(v##sign##int##bits##x##size), "vec: v" #sign "int" #bits "x" #size " alignment needs to be expanded to fit intrinsic type size"); \ VEC_STATIC_ASSERT(sizeof(__m512i) <= sizeof(v##sign##int##bits##x##size), "vec: v" #sign "int" #bits "x" #size " needs to be expanded to fit intrinsic type size"); \ \ - VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_load_aligned(const vec_##sign##int##bits in[size]) \ + static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_load_aligned(const vec_##sign##int##bits in[size]) \ { \ union v##sign##int##bits##x##size##_impl_data vec; \ vec.avx512f = _mm512_load_si512((const __m512i *)in); \ return vec.vec; \ } \ \ - VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_load(const vec_##sign##int##bits in[size]) \ + static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_load(const vec_##sign##int##bits in[size]) \ { \ union v##sign##int##bits##x##size##_impl_data vec; \ vec.avx512f = _mm512_loadu_si512((const __m512i *)in); \ return vec.vec; \ } \ \ - VEC_FUNC_IMPL void v##sign##int##bits##x##size##_avx512f_store_aligned(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \ + static void v##sign##int##bits##x##size##_avx512f_store_aligned(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \ { \ _mm512_store_si512((__m512i *)out, ((union v##sign##int##bits##x##size##_impl_data *)&vec)->avx512f); \ } \ \ - VEC_FUNC_IMPL void v##sign##int##bits##x##size##_avx512f_store(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \ + static void v##sign##int##bits##x##size##_avx512f_store(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \ { \ _mm512_storeu_si512((__m512i *)out, ((union v##sign##int##bits##x##size##_impl_data *)&vec)->avx512f); \ } \ \ - VEC_AVX512F_ADD_##bits##x##size(sign) \ - VEC_AVX512F_SUB_##bits##x##size(sign) \ - VEC_AVX512F_MUL_##bits##x##size(sign) \ + static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_add(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ + { \ + VEC_AVX512F_ADD_##bits##x##size(sign); \ + } \ \ - VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_and(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ + static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_sub(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ + { \ + VEC_AVX512F_SUB_##bits##x##size(sign); \ + } \ + \ + static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_mul(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ + { \ + VEC_AVX512F_MUL_##bits##x##size(sign); \ + } \ + \ + static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_and(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ { \ union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ @@ -143,7 +277,7 @@ return vec1d->vec; \ } \ \ - VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_or(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ + static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_or(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ { \ union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ @@ -152,7 +286,7 @@ return vec1d->vec; \ } \ \ - VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_xor(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ + static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_xor(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \ { \ union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \ union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \ @@ -161,34 +295,51 @@ return vec1d->vec; \ } \ \ - VEC_AVX512F_LSHIFT_##bits##x##size(sign); \ - VEC_AVX512F_##sign##RSHIFT_##bits##x##size(sign); \ - VEC_AVX512F_LRSHIFT_##bits##x##size(sign); \ + static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_lshift(v##sign##int##bits##x##size vec1, vuint##bits##x##size vec2) \ + { \ + VEC_AVX512F_LSHIFT_##bits##x##size(sign); \ + } \ \ - VEC_AVX512F_MINMAX_##sign##bits##x##size(min) \ - VEC_AVX512F_MINMAX_##sign##bits##x##size(max) \ + static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_rshift(v##sign##int##bits##x##size vec1, vuint##bits##x##size vec2) \ + { \ + VEC_AVX512F_##sign##RSHIFT_##bits##x##size(sign, a); \ + } \ + \ + static v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx512f_lrshift(v##sign##int##bits##x##size vec1, vuint##bits##x##size vec2) \ + { \ + VEC_AVX512F_RSHIFT_##bits##x##size(sign, l); \ + } \ \ const v##sign##int##bits##x##size##_impl v##sign##int##bits##x##size##_impl_avx512f = { \ - .load_aligned = v##sign##int##bits##x##size##_avx512f_load_aligned, \ - .load = v##sign##int##bits##x##size##_avx512f_load, \ - .store_aligned = v##sign##int##bits##x##size##_avx512f_store_aligned, \ - .store = v##sign##int##bits##x##size##_avx512f_store, \ - .add = VEC_AVX512F_STRUCT_OPERATION_##bits##x##size(add, sign), \ - .sub = VEC_AVX512F_STRUCT_OPERATION_##bits##x##size(sub, sign), \ - .mul = VEC_AVX512F_STRUCT_OPERATION_##bits##x##size(mul, sign), \ - .band = v##sign##int##bits##x##size##_avx512f_and, \ - .bor = v##sign##int##bits##x##size##_avx512f_or, \ - .bxor = v##sign##int##bits##x##size##_avx512f_xor, \ - .lshift = VEC_AVX512F_STRUCT_OPERATION_##bits##x##size(lshift, sign), \ - .rshift = VEC_AVX512F_STRUCT_OPERATION_##bits##x##size(rshift, sign), \ - .lrshift = VEC_AVX512F_STRUCT_OPERATION_##bits##x##size(lrshift, sign), \ - .min = VEC_AVX512F_STRUCT_MINMAX_##bits##x##size(min, sign), \ - .max = VEC_AVX512F_STRUCT_MINMAX_##bits##x##size(max, sign), \ + v##sign##int##bits##x##size##_generic_splat, \ + v##sign##int##bits##x##size##_avx512f_load_aligned, \ + v##sign##int##bits##x##size##_avx512f_load, \ + v##sign##int##bits##x##size##_avx512f_store_aligned, \ + v##sign##int##bits##x##size##_avx512f_store, \ + v##sign##int##bits##x##size##_avx512f_add, \ + v##sign##int##bits##x##size##_avx512f_sub, \ + v##sign##int##bits##x##size##_avx512f_mul, \ + v##sign##int##bits##x##size##_generic_div, \ + v##sign##int##bits##x##size##_generic_avg, \ + v##sign##int##bits##x##size##_avx512f_and, \ + v##sign##int##bits##x##size##_avx512f_or, \ + v##sign##int##bits##x##size##_avx512f_xor, \ + v##sign##int##bits##x##size##_generic_not, \ + v##sign##int##bits##x##size##_avx512f_lshift, \ + v##sign##int##bits##x##size##_avx512f_rshift, \ + v##sign##int##bits##x##size##_avx512f_lrshift, \ + v##sign##int##bits##x##size##_generic_cmplt, \ + v##sign##int##bits##x##size##_generic_cmple, \ + v##sign##int##bits##x##size##_generic_cmpeq, \ + v##sign##int##bits##x##size##_generic_cmpge, \ + v##sign##int##bits##x##size##_generic_cmpgt, \ }; #define VEC_AVX512F_DEFINE_OPERATIONS(bits, size) \ VEC_AVX512F_DEFINE_OPERATIONS_SIGN(u, bits, size) \ VEC_AVX512F_DEFINE_OPERATIONS_SIGN( , bits, size) +VEC_AVX512F_DEFINE_OPERATIONS(8, 64) +VEC_AVX512F_DEFINE_OPERATIONS(16, 32) VEC_AVX512F_DEFINE_OPERATIONS(32, 16) VEC_AVX512F_DEFINE_OPERATIONS(64, 8)