view src/impl/x86/sse2.c @ 28:c6c99ab1088a

*: add min/max functions and a big big refactor (again) agh, this time I added a few more implementations (and generally made the code just a little faster...)
author Paper <paper@tflc.us>
date Thu, 24 Apr 2025 00:54:02 -0400
parents e49e70f7012f
children bf6ad516f1e6
line wrap: on
line source

/**
 * vec - a tiny SIMD vector library in C99
 * 
 * Copyright (c) 2024 Paper
 * 
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 * 
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 * 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
**/

#include "vec/impl/x86/sse2.h"

#include <emmintrin.h>

#define VEC_SSE2_DEFINE_IMPL_DATA(sign, bits, size) \
	union v##sign##int##bits##x##size##_impl_data { \
		v##sign##int##bits##x##size vec; \
		__m128i sse; \
	}; \
	\
	VEC_STATIC_ASSERT(VEC_ALIGNOF(__m128i) <= VEC_ALIGNOF(v##sign##int##bits##x##size), "vec: v" #sign "int" #bits "x" #size " alignment needs to be expanded to fit intrinsic type size"); \
	VEC_STATIC_ASSERT(sizeof(__m128i) <= sizeof(v##sign##int##bits##x##size), "vec: v" #sign "int" #bits "x" #size " needs to be expanded to fit intrinsic type size");

VEC_SSE2_DEFINE_IMPL_DATA( , 8,  16)
VEC_SSE2_DEFINE_IMPL_DATA(u, 8,  16)
VEC_SSE2_DEFINE_IMPL_DATA( , 16, 8)
VEC_SSE2_DEFINE_IMPL_DATA(u, 16, 8)
VEC_SSE2_DEFINE_IMPL_DATA( , 32, 4)
VEC_SSE2_DEFINE_IMPL_DATA(u, 32, 4)
VEC_SSE2_DEFINE_IMPL_DATA( , 64, 2)
VEC_SSE2_DEFINE_IMPL_DATA(u, 64, 2)

/* eh */
#define MM_SET1_8(x)  _mm_set1_epi8(x)
#define MM_SET1_16(x) _mm_set1_epi16(x)
#define MM_SET1_32(x) _mm_set1_epi32(x)
#define MM_SET1_64(x) _mm_set1_epi64x(x)

/* ------------------------------------------------------------------------ */

/* unfortunately doing this for SSE2 is PREPROCESSOR HELL */
#define VEC_SSE2_MINMAX_8x16_TEMPLATE(SIGN, OP, VALS, ADDITIONAL1, ADDITIONAL2) \
	VEC_FUNC_IMPL v##SIGN##int8x16 v##SIGN##int8x16_sse2_##OP(v##SIGN##int8x16 vec1, v##SIGN##int8x16 vec2) \
	{ \
		union v##SIGN##int8x16_impl_data *vec1d = (union v##SIGN##int8x16_impl_data *)&vec1; \
		union v##SIGN##int8x16_impl_data *vec2d = (union v##SIGN##int8x16_impl_data *)&vec2; \
		VALS \
	\
		ADDITIONAL1 \
	\
		vec1d->sse = _mm_##OP##_epu8(vec1d->sse, vec2d->sse); \
	\
		ADDITIONAL2 \
	\
		return vec1d->vec; \
	}

/* conveniently, this looks like K&R C ;) */
#define VEC_SSE2_MINMAX_8x16(OP) \
	VEC_SSE2_MINMAX_8x16_TEMPLATE(/* nothing */, OP, \
		__m128i xor_val = _mm_set1_epi8(0x80u); \
	, { \
		vec1d->sse = _mm_xor_si128(vec1d->sse, xor_val); \
		vec2d->sse = _mm_xor_si128(vec2d->sse, xor_val); \
	}, \
	{ \
		vec1d->sse = _mm_xor_si128(vec1d->sse, xor_val); \
	})

#define VEC_SSE2_MINMAX_u8x16(OP) \
	VEC_SSE2_MINMAX_8x16_TEMPLATE(u, OP, /* nothing */, /* nothing */, /* nothing */)

#define VEC_SSE2_MINMAX_16x8_TEMPLATE(SIGN, OP, VALS, ADDITIONAL1, ADDITIONAL2) \
	VEC_FUNC_IMPL v##SIGN##int16x8 v##SIGN##int16x8_sse2_##OP(v##SIGN##int16x8 vec1, v##SIGN##int16x8 vec2) \
	{ \
		union v##SIGN##int16x8_impl_data *vec1d = (union v##SIGN##int16x8_impl_data *)&vec1; \
		union v##SIGN##int16x8_impl_data *vec2d = (union v##SIGN##int16x8_impl_data *)&vec2; \
		VALS \
	\
		ADDITIONAL1 \
	\
		vec1d->sse = _mm_##OP##_epi16(vec1d->sse, vec2d->sse); \
	\
		ADDITIONAL2 \
	\
		return vec1d->vec; \
	}

#define VEC_SSE2_MINMAX_16x8(OP) \
	VEC_SSE2_MINMAX_16x8_TEMPLATE(/* nothing */, OP, /* nothing */, /* nothing */, /* nothing */)

#define VEC_SSE2_MINMAX_u16x8(OP) \
	VEC_SSE2_MINMAX_16x8_TEMPLATE(u, OP, \
		__m128i xor_val = _mm_set1_epi16(0x8000u); \
	, { \
		vec1d->sse = _mm_xor_si128(vec1d->sse, xor_val); \
		vec2d->sse = _mm_xor_si128(vec2d->sse, xor_val); \
	}, \
	{ \
		vec1d->sse = _mm_xor_si128(vec1d->sse, xor_val); \
	})

#define VEC_SSE2_MINMAX_32x4(OP) /* none */
#define VEC_SSE2_MINMAX_64x2(OP) /* none */
#define VEC_SSE2_MINMAX_u32x4(OP) /* none */
#define VEC_SSE2_MINMAX_u64x2(OP) /* none */

#define VEC_SSE2_STRUCT_MINMAX_8x16(OP, SIGN) v##SIGN##int8x16_sse2_##OP
#define VEC_SSE2_STRUCT_MINMAX_16x8(OP, SIGN) v##SIGN##int16x8_sse2_##OP
#define VEC_SSE2_STRUCT_MINMAX_32x4(OP, SIGN) NULL
#define VEC_SSE2_STRUCT_MINMAX_64x2(OP, SIGN) NULL

/* ------------------------------------------------------------------------ */
/* multiplication */

#define VEC_SSE2_OPERATION_8x16(op, sign) \
	do { \
		/* unpack and multiply */ \
		union v##sign##int8x16_impl_data *vec1d = (union v##sign##int8x16_impl_data *)&vec1; \
		union v##sign##int8x16_impl_data *vec2d = (union v##sign##int8x16_impl_data *)&vec2; \
	\
		__m128i dst_even = _mm_##op##_epi16(vec1d->sse, vec2d->sse); \
		__m128i dst_odd = _mm_##op##_epi16(_mm_srli_epi16(vec1d->sse, 8), _mm_srli_epi16(vec2d->sse, 8)); \
	\
		/* repack */ \
		vec1d->sse = _mm_or_si128( \
			_mm_slli_epi16(dst_odd, 8), \
			_mm_srli_epi16(_mm_slli_epi16(dst_even, 8), 8) \
		); \
		return vec1d->vec; \
	} while (0)

// shared between SSE2 variations
#define VEC_SSE2_MUL_8x16(sign) \
	VEC_SSE2_OPERATION_8x16(mullo, sign)

#define VEC_SSE2_MUL_16x8(sign) \
	do { \
		/* we have a real instruction for this */ \
		union v##sign##int16x8_impl_data *vec1d = (union v##sign##int16x8_impl_data *)&vec1; \
		union v##sign##int16x8_impl_data *vec2d = (union v##sign##int16x8_impl_data *)&vec2; \
	\
		vec1d->sse = _mm_mullo_epi16(vec1d->sse, vec2d->sse); \
		return vec1d->vec; \
	} while (0)

#define VEC_SSE2_MUL_32x4(sign) \
	do { \
		/* this was stolen from... somewhere :) */ \
		union v##sign##int32x4_impl_data *vec1d = (union v##sign##int32x4_impl_data *)&vec1; \
		union v##sign##int32x4_impl_data *vec2d = (union v##sign##int32x4_impl_data *)&vec2; \
	\
		__m128i a13    = _mm_shuffle_epi32(vec1d->sse, 0xF5); /* (-,a3,-,a1) */ \
		__m128i b13    = _mm_shuffle_epi32(vec2d->sse, 0xF5); /* (-,b3,-,b1) */ \
		__m128i prod02 = _mm_mul_epu32(vec1d->sse, vec2d->sse); /* (-,a2*b2,-,a0*b0) */ \
		__m128i prod13 = _mm_mul_epu32(a13, b13);           /* (-,a3*b3,-,a1*b1) */ \
		__m128i prod01 = _mm_unpacklo_epi32(prod02,prod13); /* (-,-,a1*b1,a0*b0) */ \
		__m128i prod23 = _mm_unpackhi_epi32(prod02,prod13); /* (-,-,a3*b3,a2*b2) */ \
	\
		vec1d->sse = _mm_srl_epi64(prod01, prod23); /* (ab3,ab2,ab1,ab0) */ \
		return vec1d->vec; \
	} while (0)

#define VEC_SSE2_MUL_64x2(sign) \
	do { \
		union v##sign##int64x2_impl_data *vec1d = (union v##sign##int64x2_impl_data *)&vec1; \
		union v##sign##int64x2_impl_data *vec2d = (union v##sign##int64x2_impl_data *)&vec2; \
	\
		__m128i ac = _mm_mul_epu32(vec1d->sse, vec2d->sse); /* ac = (vec1 & UINT32_MAX) * (vec2 & UINT32_MAX); */ \
		__m128i b  = _mm_srli_epi64(vec1d->sse, 32);      /* b = vec1 >> 32; */ \
		__m128i bc = _mm_mul_epu32(b, vec2d->sse);        /* bc = b * (vec2 & UINT32_MAX); */ \
		__m128i d  = _mm_srli_epi64(vec2d->sse, 32);      /* d = vec2 >> 32; */ \
		__m128i ad = _mm_mul_epu32(vec1d->sse, d);        /* ad = (vec1 & UINT32_MAX) * d; */ \
		__m128i hi = _mm_add_epi64(bc, ad);             /* hi = bc + ad; */ \
		hi = _mm_slli_epi64(hi, 32);                    /* hi <<= 32; */ \
	\
		vec1d->sse = _mm_add_epi64(hi, ac); /* (ab3,ab2,ab1,ab0) */ \
		return vec1d->vec; \
	} while (0)

/* ------------------------------------------------------------------------ */
/* comparison */

/* helper funcs */
#define VEC_xSSE2_CMP(name, op, sign, bits, size, first, second, VARS, TRANS1, TRANS2) \
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_##name(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
		union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
		VARS \
	\
		TRANS1 \
	\
		vec1d->sse = _mm_##op##_epi##bits(vec##first##d->sse, vec##second##d->sse); \
	\
		TRANS2 \
	\
		return vec1d->vec; \
	}

#define VEC_SSE2_CMP(name, op, bits, size, first, second) \
	VEC_xSSE2_CMP(name, op, /* nothing */, bits, size, first, second, /* nothing */, /* nothing */, /* nothing */)

#define VEC_uSSE2_CMP(name, op, bits, size, first, second) \
	VEC_xSSE2_CMP(name, op, u, bits, size, first, second, \
		__m128i xor_val = MM_SET1_##bits(1u << (bits - 1)); \
	, { \
		vec1d->sse = _mm_xor_si128(vec1d->sse, xor_val); \
		vec2d->sse = _mm_xor_si128(vec2d->sse, xor_val); \
	}, \
	{ \
		/* nothing */ \
	})

/* these are the same for unsigned and signed, for obvious reasons. */
#define VEC_SSE2_CMPEQ_8x16(sign) VEC_xSSE2_CMP(cmpeq, cmpeq, sign, 8, 16, 1, 2, , ,)
#define VEC_SSE2_CMPEQ_16x8(sign) VEC_xSSE2_CMP(cmpeq, cmpeq, sign, 16, 8, 1, 2, , ,)
#define VEC_SSE2_CMPEQ_32x4(sign) VEC_xSSE2_CMP(cmpeq, cmpeq, sign, 32, 4, 1, 2, , ,)

/* SSE2 doesn't have an intrinsic for 64x2 equality comparison,
 * so how can we take a 32x4 comparison result and turn it into
 * a 64x2 comparison result?
 *
 * well, Intel conveniently provided an operation where we can
 * shuffle around 32-bit integers (_mm_shuffle_epi32).
 *
 * this means all we have to do is simply do the 32-bit operation,
 * shuffle the parts, and then return a bitwise AND of the result. */

#define VEC_SSE2_CMPEQ_64x2(sign) \
	VEC_FUNC_IMPL v##sign##int64x2 v##sign##int64x2_sse2_cmpeq(v##sign##int64x2 vec1, v##sign##int64x2 vec2) \
	{ \
		union v##sign##int64x2_impl_data *vec1d = (union v##sign##int64x2_impl_data *)&vec1; \
		union v##sign##int64x2_impl_data *vec2d = (union v##sign##int64x2_impl_data *)&vec2; \
	\
		vec1d->sse = _mm_cmpeq_epi32(vec1d->sse, vec2d->sse); \
		vec2d->sse = _mm_shuffle_epi32(vec1d->sse, _MM_SHUFFLE(1, 1, 3, 3)); \
		vec1d->sse = _mm_shuffle_epi32(vec1d->sse, _MM_SHUFFLE(0, 0, 2, 2)); \
		vec1d->sse = _mm_and_si128(vec1d->sse, vec2d->sse); \
	\
		return vec1d->vec; \
	}

/* ------------------------------------------------------------------------ */

#define VEC_SSE2_CMPLT_8x16(sign) VEC_##sign##SSE2_CMP(cmplt, cmpgt, 8, 16, 2, 1)
#define VEC_SSE2_CMPLT_16x8(sign) VEC_##sign##SSE2_CMP(cmplt, cmpgt, 16, 8, 2, 1)
#define VEC_SSE2_CMPLT_32x4(sign) VEC_##sign##SSE2_CMP(cmplt, cmpgt, 32, 4, 2, 1)
#define VEC_SSE2_CMPLT_64x2(sign) /* nothing */

#define VEC_SSE2_CMPGT_8x16(sign) VEC_##sign##SSE2_CMP(cmpgt, cmpgt, 8, 16, 1, 2)
#define VEC_SSE2_CMPGT_16x8(sign) VEC_##sign##SSE2_CMP(cmpgt, cmpgt, 16, 8, 1, 2)
#define VEC_SSE2_CMPGT_32x4(sign) VEC_##sign##SSE2_CMP(cmpgt, cmpgt, 32, 4, 1, 2)
#define VEC_SSE2_CMPGT_64x2(sign) /* nothing */

#define VEC_SSE2_STRUCT_CMP_8x16(name, sign) v##sign##int8x16_sse2_##name
#define VEC_SSE2_STRUCT_CMP_16x8(name, sign) v##sign##int16x8_sse2_##name
#define VEC_SSE2_STRUCT_CMP_32x4(name, sign) v##sign##int32x4_sse2_##name
#define VEC_SSE2_STRUCT_CMP_64x2(name, sign) NULL

/* ------------------------------------------------------------------------ */

#define VEC_SSE2_DEFINE_OPERATIONS_SIGN(sign, bits, size) \
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_splat(vec_##sign##int##bits x) \
	{ \
		union v##sign##int##bits##x##size##_impl_data vec; \
		vec.sse = MM_SET1_##bits(x); \
		return vec.vec; \
	} \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_load_aligned(const vec_##sign##int##bits in[size]) \
	{ \
		union v##sign##int##bits##x##size##_impl_data vec; \
		vec.sse = _mm_load_si128((const __m128i *)in); \
		return vec.vec; \
	} \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_load(const vec_##sign##int##bits in[size]) \
	{ \
		union v##sign##int##bits##x##size##_impl_data vec; \
		vec.sse = _mm_loadu_si128((const __m128i *)in); \
		return vec.vec; \
	} \
	\
	VEC_FUNC_IMPL void v##sign##int##bits##x##size##_sse2_store_aligned(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \
	{ \
		_mm_store_si128((__m128i *)out, ((union v##sign##int##bits##x##size##_impl_data *)&vec)->sse); \
	} \
	\
	VEC_FUNC_IMPL void v##sign##int##bits##x##size##_sse2_store(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \
	{ \
		_mm_storeu_si128((__m128i *)out, ((union v##sign##int##bits##x##size##_impl_data *)&vec)->sse); \
	} \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_add(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
		union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
	\
		vec1d->sse = _mm_add_epi##bits(vec1d->sse, vec2d->sse); \
		return vec1d->vec; \
	} \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_sub(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
		union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
	\
		vec1d->sse = _mm_sub_epi##bits(vec1d->sse, vec2d->sse); \
		return vec1d->vec; \
	} \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_mul(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		VEC_SSE2_MUL_##bits##x##size(sign); \
	} \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_and(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
		union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
	\
		vec1d->sse = _mm_and_si128(vec1d->sse, vec2d->sse); \
		return vec1d->vec; \
	} \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_or(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
		union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
	\
		vec1d->sse = _mm_or_si128(vec1d->sse, vec2d->sse); \
		return vec1d->vec; \
	} \
	\
	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_xor(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
		union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
	\
		vec1d->sse = _mm_xor_si128(vec1d->sse, vec2d->sse); \
		return vec1d->vec; \
	} \
	\
	VEC_SSE2_CMPEQ_##bits##x##size(sign); \
	VEC_SSE2_CMPLT_##bits##x##size(sign); \
	VEC_SSE2_CMPGT_##bits##x##size(sign); \
	\
	VEC_SSE2_MINMAX_##sign##bits##x##size(min) \
	VEC_SSE2_MINMAX_##sign##bits##x##size(max) \
	\
	const v##sign##int##bits##x##size##_impl v##sign##int##bits##x##size##_impl_sse2 = { \
		.splat = v##sign##int##bits##x##size##_sse2_splat, \
		.load_aligned = v##sign##int##bits##x##size##_sse2_load_aligned, \
		.load = v##sign##int##bits##x##size##_sse2_load, \
		.store_aligned = v##sign##int##bits##x##size##_sse2_store_aligned, \
		.store = v##sign##int##bits##x##size##_sse2_store, \
		.add = v##sign##int##bits##x##size##_sse2_add, \
		.sub = v##sign##int##bits##x##size##_sse2_sub, \
		.mul = v##sign##int##bits##x##size##_sse2_mul, \
		.band = v##sign##int##bits##x##size##_sse2_and, \
		.bor = v##sign##int##bits##x##size##_sse2_or, \
		.bxor = v##sign##int##bits##x##size##_sse2_xor, \
		.cmpeq = v##sign##int##bits##x##size##_sse2_cmpeq, \
		.cmplt = VEC_SSE2_STRUCT_CMP_##bits##x##size(cmplt, sign), \
		.cmpgt = VEC_SSE2_STRUCT_CMP_##bits##x##size(cmpgt, sign), \
		.min = VEC_SSE2_STRUCT_MINMAX_##bits##x##size(min, sign), \
		.max = VEC_SSE2_STRUCT_MINMAX_##bits##x##size(max, sign), \
	};

#define VEC_SSE2_DEFINE_OPERATIONS(bits, size) \
	VEC_SSE2_DEFINE_OPERATIONS_SIGN(u, bits, size) \
	VEC_SSE2_DEFINE_OPERATIONS_SIGN( , bits, size)

// SSE is *only* 128-bit
VEC_SSE2_DEFINE_OPERATIONS(8, 16)
VEC_SSE2_DEFINE_OPERATIONS(16, 8)
VEC_SSE2_DEFINE_OPERATIONS(32, 4)
VEC_SSE2_DEFINE_OPERATIONS(64, 2)