view src/impl/x86/avx2.c @ 28:c6c99ab1088a

*: add min/max functions and a big big refactor (again) agh, this time I added a few more implementations (and generally made the code just a little faster...)
author Paper <paper@tflc.us>
date Thu, 24 Apr 2025 00:54:02 -0400
parents e49e70f7012f
children bf6ad516f1e6
line wrap: on
line source

/**
 * vec - a tiny SIMD vector library in C99
 * 
 * Copyright (c) 2024 Paper
 * 
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 * 
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 * 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
**/

#include "vec/impl/x86/avx2.h"

#include <immintrin.h>

/* ------------------------------------------------------------------------ */
/* min/max */

#define VEC_AVX2_MINMAX_TEMPLATE(SIGN, BITS, SIZE, INTLSIGN, OP) \
	VEC_FUNC_IMPL v##SIGN##int##BITS##x##SIZE v##SIGN##int##BITS##x##SIZE##_avx2_##OP(v##SIGN##int##BITS##x##SIZE vec1, v##SIGN##int##BITS##x##SIZE vec2) \
	{ \
		union v##SIGN##int##BITS##x##SIZE##_impl_data *vec1d = (union v##SIGN##int##BITS##x##SIZE##_impl_data *)&vec1; \
		union v##SIGN##int##BITS##x##SIZE##_impl_data *vec2d = (union v##SIGN##int##BITS##x##SIZE##_impl_data *)&vec2; \
	\
		vec1d->avx2 = _mm256_##OP##_ep##INTLSIGN##BITS(vec1d->avx2, vec2d->avx2); \
	\
		return vec1d->vec; \
	}

#define VEC_AVX2_MINMAX_8x32(OP)   VEC_AVX2_MINMAX_TEMPLATE( , 8, 32, i, OP)
#define VEC_AVX2_MINMAX_u8x32(OP)  VEC_AVX2_MINMAX_TEMPLATE(u, 8, 32, u, OP)
#define VEC_AVX2_MINMAX_16x16(OP)  VEC_AVX2_MINMAX_TEMPLATE( , 16, 16, i, OP)
#define VEC_AVX2_MINMAX_u16x16(OP) VEC_AVX2_MINMAX_TEMPLATE(u, 16, 16, u, OP)
#define VEC_AVX2_MINMAX_32x8(OP)   VEC_AVX2_MINMAX_TEMPLATE( , 32, 8, i, OP)
#define VEC_AVX2_MINMAX_u32x8(OP)  VEC_AVX2_MINMAX_TEMPLATE(u, 32, 8, u, OP)
#define VEC_AVX2_MINMAX_64x4(OP)   /* nothing */
#define VEC_AVX2_MINMAX_u64x4(OP)  /* nothing */

#define VEC_AVX2_STRUCT_MINMAX_8x32(OP, SIGN)  v##SIGN##int8x32_avx2_##OP
#define VEC_AVX2_STRUCT_MINMAX_16x16(OP, SIGN) v##SIGN##int16x16_avx2_##OP
#define VEC_AVX2_STRUCT_MINMAX_32x8(OP, SIGN)  v##SIGN##int32x8_avx2_##OP
#define VEC_AVX2_STRUCT_MINMAX_64x4(OP, SIGN)  NULL

/* reused this for avg */

#define VEC_AVX2_AVG_8x32  /* nothing */
#define VEC_AVX2_AVG_16x16 /* nothing */
#define VEC_AVX2_AVG_32x8  /* nothing */
#define VEC_AVX2_AVG_64x4  /* nothing */

#define VEC_AVX2_AVG_u8x32  VEC_AVX2_MINMAX_TEMPLATE(u, 8,  32, u, avg)
#define VEC_AVX2_AVG_u16x16 VEC_AVX2_MINMAX_TEMPLATE(u, 16, 16, u, avg)
#define VEC_AVX2_AVG_u32x8  /* nothing */
#define VEC_AVX2_AVG_u64x4  /* nothing */

#define VEC_AVX2_STRUCT_AVG_8x32  NULL
#define VEC_AVX2_STRUCT_AVG_16x16 NULL
#define VEC_AVX2_STRUCT_AVG_32x8  NULL
#define VEC_AVX2_STRUCT_AVG_64x4  NULL

#define VEC_AVX2_STRUCT_AVG_u8x32  vuint8x32_avx2_avg
#define VEC_AVX2_STRUCT_AVG_u16x16 vuint16x16_avx2_avg
#define VEC_AVX2_STRUCT_AVG_u32x8  NULL
#define VEC_AVX2_STRUCT_AVG_u64x4  NULL

/* ------------------------------------------------------------------------ */

// multiplication

#define VEC_AVX2_MUL_8x32(sign) /* nothing */

#define VEC_AVX2_MUL_16x16(sign) \
	VEC_FUNC_IMPL v##sign##int16x16 v##sign##int16x16_avx2_mul(v##sign##int16x16 vec1, v##sign##int16x16 vec2) \
	{ \
		union v##sign##int16x16_impl_data *vec1d = (union v##sign##int16x16_impl_data *)&vec1; \
		union v##sign##int16x16_impl_data *vec2d = (union v##sign##int16x16_impl_data *)&vec2; \
	\
		vec1d->avx2 = _mm256_mullo_epi16(vec1d->avx2, vec2d->avx2); \
		return vec1d->vec; \
	}

#define VEC_AVX2_MUL_32x8(sign) \
	VEC_FUNC_IMPL v##sign##int32x8 v##sign##int32x8_avx2_mul(v##sign##int32x8 vec1, v##sign##int32x8 vec2) \
	{ \
		union v##sign##int32x8_impl_data *vec1d = (union v##sign##int32x8_impl_data *)&vec1; \
		union v##sign##int32x8_impl_data *vec2d = (union v##sign##int32x8_impl_data *)&vec2; \
	\
		vec1d->avx2 = _mm256_mullo_epi32(vec1d->avx2, vec2d->avx2); \
		return vec1d->vec; \
	}

#define VEC_AVX2_MUL_64x4(sign) \
	VEC_FUNC_IMPL v##sign##int64x4 v##sign##int64x4_avx2_mul(v##sign##int64x4 vec1, v##sign##int64x4 vec2) \
	{ \
		union v##sign##int64x4_impl_data *vec1d = (union v##sign##int64x4_impl_data *)&vec1; \
		union v##sign##int64x4_impl_data *vec2d = (union v##sign##int64x4_impl_data *)&vec2; \
	\
		__m256i ac = _mm256_mul_epu32(vec1d->avx2, vec2d->avx2); \
		__m256i b  = _mm256_srli_epi64(vec1d->avx2, 32); \
		__m256i bc = _mm256_mul_epu32(b, vec2d->avx2); \
		__m256i d  = _mm256_srli_epi64(vec2d->avx2, 32); \
		__m256i ad = _mm256_mul_epu32(vec1d->avx2, d); \
		__m256i hi = _mm256_add_epi64(bc, ad); \
		hi = _mm256_slli_epi64(hi, 32); \
	\
		vec1d->avx2 = _mm256_add_epi64(hi, ac); \
		return vec1d->vec; \
	}

#define VEC_AVX2_STRUCT_MUL_8x32(SIGN) NULL
#define VEC_AVX2_STRUCT_MUL_16x16(SIGN) v##SIGN##int16x16_avx2_mul
#define VEC_AVX2_STRUCT_MUL_32x8(SIGN) v##SIGN##int32x8_avx2_mul
#define VEC_AVX2_STRUCT_MUL_64x4(SIGN) v##SIGN##int64x4_avx2_mul

// operations

#define VEC_AVX2_DEFINE_OPERATIONS_SIGN(sign, bits, size) \
	union v##sign##int##bits##x##size##_impl_data { \
		v##sign##int##bits##x##size vec; \
		__m256i avx2; \
	}; \
	\
	VEC_STATIC_ASSERT(VEC_ALIGNOF(__m256i) <= VEC_ALIGNOF(v##sign##int##bits##x##size), "vec: v" #sign "int" #bits "x" #size " alignment needs to be expanded to fit intrinsic type size"); \
	VEC_STATIC_ASSERT(sizeof(__m256i) <= sizeof(v##sign##int##bits##x##size), "vec: v" #sign "int" #bits "x" #size " needs to be expanded to fit intrinsic type size"); \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_load_aligned(const vec_##sign##int##bits in[size]) \
	{ \
		union v##sign##int##bits##x##size##_impl_data vec; \
		vec.avx2 = _mm256_load_si256((const __m256i *)in); \
		return vec.vec; \
	} \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_load(const vec_##sign##int##bits in[size]) \
	{ \
		union v##sign##int##bits##x##size##_impl_data vec; \
		vec.avx2 = _mm256_loadu_si256((const __m256i *)in); \
		return vec.vec; \
	} \
	\
	VEC_FUNC_IMPL void v##sign##int##bits##x##size##_avx2_store_aligned(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \
	{ \
		_mm256_store_si256((__m256i *)out, ((union v##sign##int##bits##x##size##_impl_data*)&vec)->avx2); \
	} \
	\
	VEC_FUNC_IMPL void v##sign##int##bits##x##size##_avx2_store(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \
	{ \
		_mm256_storeu_si256((__m256i *)out, ((union v##sign##int##bits##x##size##_impl_data*)&vec)->avx2); \
	} \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_add(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
		union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
	\
		vec1d->avx2 = _mm256_add_epi##bits(vec1d->avx2, vec2d->avx2); \
		return vec1d->vec; \
	} \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_sub(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
		union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
	\
		vec1d->avx2 = _mm256_sub_epi##bits(vec1d->avx2, vec2d->avx2); \
		return vec1d->vec; \
	} \
	\
	VEC_AVX2_MUL_##bits##x##size(sign) \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_and(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
		union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
	\
		vec1d->avx2 = _mm256_and_si256(vec1d->avx2, vec2d->avx2); \
		return vec1d->vec; \
	} \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_or(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
		union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
	\
		vec1d->avx2 = _mm256_or_si256(vec1d->avx2, vec2d->avx2); \
		return vec1d->vec; \
	} \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avx2_xor(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
		union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
	\
		vec1d->avx2 = _mm256_xor_si256(vec1d->avx2, vec2d->avx2); \
		return vec1d->vec; \
	} \
	\
	VEC_AVX2_AVG_##sign##bits##x##size \
	\
	VEC_AVX2_MINMAX_##sign##bits##x##size(min) \
	VEC_AVX2_MINMAX_##sign##bits##x##size(max) \
	\
	const v##sign##int##bits##x##size##_impl v##sign##int##bits##x##size##_impl_avx2 = { \
		.load_aligned = v##sign##int##bits##x##size##_avx2_load_aligned, \
		.load = v##sign##int##bits##x##size##_avx2_load, \
		.store_aligned = v##sign##int##bits##x##size##_avx2_store_aligned, \
		.store = v##sign##int##bits##x##size##_avx2_store, \
		.add = v##sign##int##bits##x##size##_avx2_add, \
		.sub = v##sign##int##bits##x##size##_avx2_sub, \
		.mul = VEC_AVX2_STRUCT_MUL_##bits##x##size(sign), \
		.band = v##sign##int##bits##x##size##_avx2_and, \
		.bor = v##sign##int##bits##x##size##_avx2_or, \
		.bxor = v##sign##int##bits##x##size##_avx2_xor, \
		.min = VEC_AVX2_STRUCT_MINMAX_##bits##x##size(min, sign), \
		.max = VEC_AVX2_STRUCT_MINMAX_##bits##x##size(max, sign), \
		.avg = VEC_AVX2_STRUCT_AVG_##sign##bits##x##size, \
	};

#define VEC_AVX2_DEFINE_OPERATIONS(bits, size) \
	VEC_AVX2_DEFINE_OPERATIONS_SIGN( , bits, size) \
	VEC_AVX2_DEFINE_OPERATIONS_SIGN(u, bits, size)

VEC_AVX2_DEFINE_OPERATIONS(8, 32)
VEC_AVX2_DEFINE_OPERATIONS(16, 16)
VEC_AVX2_DEFINE_OPERATIONS(32, 8)
VEC_AVX2_DEFINE_OPERATIONS(64, 4)