diff src/impl/x86/sse41.c @ 28:c6c99ab1088a

*: add min/max functions and a big big refactor (again) agh, this time I added a few more implementations (and generally made the code just a little faster...)
author Paper <paper@tflc.us>
date Thu, 24 Apr 2025 00:54:02 -0400
parents e49e70f7012f
children bf6ad516f1e6
line wrap: on
line diff
--- a/src/impl/x86/sse41.c	Mon Nov 25 00:33:02 2024 -0500
+++ b/src/impl/x86/sse41.c	Thu Apr 24 00:54:02 2025 -0400
@@ -23,54 +23,138 @@
 **/
 
 #include "vec/impl/x86/sse41.h"
-#include "vec/impl/x86/sse2.h"
 #include "vec/impl/generic.h"
 
 #include <immintrin.h>
 
-// SSE 4.1 provides a real _mm_mullo_epi32
-#define VEC_SSE41_DEFINE_OPERATIONS(sign) \
-	union v##sign##int32x4_impl_data { \
-		v##sign##int32x4 vec; \
-		__m128i sse; \
-	}; \
+/* ------------------------------------------------------------------------ */
+
+#define VEC_SSE41_MINMAX_TEMPLATE(SIGN, BITS, SIZE, INTLSIGN, OP) \
+	VEC_FUNC_IMPL v##SIGN##int##BITS##x##SIZE v##SIGN##int##BITS##x##SIZE##_sse41_##OP(v##SIGN##int##BITS##x##SIZE vec1, v##SIGN##int##BITS##x##SIZE vec2) \
+	{ \
+		union v##SIGN##int##BITS##x##SIZE##_impl_data *vec1d = (union v##SIGN##int##BITS##x##SIZE##_impl_data *)&vec1; \
+		union v##SIGN##int##BITS##x##SIZE##_impl_data *vec2d = (union v##SIGN##int##BITS##x##SIZE##_impl_data *)&vec2; \
+	\
+		vec1d->sse = _mm_##OP##_ep##INTLSIGN##BITS(vec1d->sse, vec2d->sse); \
 	\
-	VEC_STATIC_ASSERT(VEC_ALIGNOF(__m128i) <= VEC_ALIGNOF(v##sign##int32x4), "vec: v" #sign "int32x4 alignment needs to be expanded to fit intrinsic type size"); \
-	VEC_STATIC_ASSERT(sizeof(__m128i) <= sizeof(v##sign##int32x4), "vec: v" #sign "int32x4 needs to be expanded to fit intrinsic type size"); \
-	\
-	static v##sign##int32x4 v##sign##int32x4_sse41_mul(v##sign##int32x4 vec1, v##sign##int32x4 vec2) \
+		return vec1d->vec; \
+	}
+
+#define VEC_SSE41_MINMAX_8x16(OP)  VEC_SSE41_MINMAX_TEMPLATE( , 8, 16, i, OP)
+#define VEC_SSE41_MINMAX_u8x16(OP) VEC_SSE41_MINMAX_TEMPLATE(u, 8, 16, u, OP)
+#define VEC_SSE41_MINMAX_16x8(OP)  VEC_SSE41_MINMAX_TEMPLATE( , 16, 8, i, OP)
+#define VEC_SSE41_MINMAX_u16x8(OP) VEC_SSE41_MINMAX_TEMPLATE(u, 16, 8, u, OP)
+#define VEC_SSE41_MINMAX_32x4(OP)  VEC_SSE41_MINMAX_TEMPLATE( , 32, 4, i, OP)
+#define VEC_SSE41_MINMAX_u32x4(OP) VEC_SSE41_MINMAX_TEMPLATE(u, 32, 4, u, OP)
+#define VEC_SSE41_MINMAX_64x2(OP)  /* nothing */
+#define VEC_SSE41_MINMAX_u64x2(OP) /* nothing */
+
+#define VEC_SSE41_STRUCT_MINMAX_8x16(OP, SIGN) v##SIGN##int8x16_sse41_##OP
+#define VEC_SSE41_STRUCT_MINMAX_16x8(OP, SIGN) v##SIGN##int16x8_sse41_##OP
+#define VEC_SSE41_STRUCT_MINMAX_32x4(OP, SIGN) v##SIGN##int32x4_sse41_##OP
+#define VEC_SSE41_STRUCT_MINMAX_64x2(OP, SIGN) NULL
+
+/* ------------------------------------------------------------------------ */
+/* multiplication */
+
+#define VEC_SSE41_MUL_8x16(sign)
+#define VEC_SSE41_MUL_16x8(sign)
+#define VEC_SSE41_MUL_32x4(sign) \
+	VEC_FUNC_IMPL v##sign##int32x4 v##sign##int32x4_sse41_mul(v##sign##int32x4 vec1, v##sign##int32x4 vec2) \
 	{ \
 		union v##sign##int32x4_impl_data *vec1d = (union v##sign##int32x4_impl_data *)&vec1; \
 		union v##sign##int32x4_impl_data *vec2d = (union v##sign##int32x4_impl_data *)&vec2; \
 	\
 		vec1d->sse = _mm_mullo_epi32(vec1d->sse, vec2d->sse); \
 		return vec1d->vec; \
-	} \
+	}
+#define VEC_SSE41_MUL_64x2(sign)
+
+#define VEC_SSE41_STRUCT_MUL_8x16(SIGN) NULL
+#define VEC_SSE41_STRUCT_MUL_16x8(SIGN) NULL
+#define VEC_SSE41_STRUCT_MUL_32x4(SIGN) v##SIGN##int32x4_sse41_mul
+#define VEC_SSE41_STRUCT_MUL_64x2(SIGN) NULL
+
+/* ------------------------------------------------------------------------ */
+/* comparison */
+
+#define MM_SET1_64(x) _mm_set1_epi64x(x)
+
+/* helper funcs */
+#define VEC_xSSE41_CMP(name, op, sign, bits, size, first, second, VARS, TRANS1, TRANS2) \
+	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse41_##name(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
+	{ \
+		union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
+		union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
+		VARS \
+	\
+		TRANS1 \
+	\
+		vec1d->sse = _mm_##op##_epi##bits(vec##first##d->sse, vec##second##d->sse); \
+	\
+		TRANS2 \
 	\
-	const v##sign##int32x4_impl v##sign##int32x4_impl_sse41 = { \
-		v##sign##int32x4_generic_splat, \
-		v##sign##int32x4_sse2_load_aligned, \
-		v##sign##int32x4_sse2_load, \
-		v##sign##int32x4_sse2_store_aligned, \
-		v##sign##int32x4_sse2_store, \
-		v##sign##int32x4_sse2_add, \
-		v##sign##int32x4_sse2_sub, \
-		v##sign##int32x4_sse41_mul, \
-		v##sign##int32x4_generic_div, \
-		v##sign##int32x4_generic_avg, \
-		v##sign##int32x4_sse2_and, \
-		v##sign##int32x4_sse2_or, \
-		v##sign##int32x4_sse2_xor, \
-		v##sign##int32x4_generic_not, \
-		v##sign##int32x4_generic_lshift, \
-		v##sign##int32x4_generic_rshift, \
-		v##sign##int32x4_generic_lrshift, \
-		v##sign##int32x4_generic_cmplt, \
-		v##sign##int32x4_generic_cmple, \
-		v##sign##int32x4_sse2_cmpeq, \
-		v##sign##int32x4_generic_cmpge, \
-		v##sign##int32x4_generic_cmpgt, \
+		return vec1d->vec; \
+	}
+
+#define VEC_SSE41_CMP(name, op, bits, size, first, second) \
+	VEC_xSSE41_CMP(name, op, /* nothing */, bits, size, first, second, /* nothing */, /* nothing */, /* nothing */)
+
+#define VEC_uSSE41_CMP(name, op, bits, size, first, second) \
+	VEC_xSSE41_CMP(name, op, u, bits, size, first, second, \
+		__m128i xor_val = MM_SET1_##bits(UINT64_C(1) << (bits - 1)); \
+	, { \
+		vec1d->sse = _mm_xor_si128(vec1d->sse, xor_val); \
+		vec2d->sse = _mm_xor_si128(vec2d->sse, xor_val); \
+	}, \
+	{ \
+		/* nothing */ \
+	})
+
+/* these are the same for unsigned and signed, for obvious reasons. */
+#define VEC_SSE41_CMPEQ_8x16(sign) /* nothing */
+#define VEC_SSE41_CMPEQ_16x8(sign) /* nothing */
+#define VEC_SSE41_CMPEQ_32x4(sign) /* nothing */
+#define VEC_SSE41_CMPEQ_64x2(sign) VEC_xSSE41_CMP(cmpeq, cmpeq, sign, 64, 2, 1, 2, , ,)
+
+/* ------------------------------------------------------------------------ */
+
+#define VEC_SSE41_STRUCT_CMP_8x16(name, sign) NULL
+#define VEC_SSE41_STRUCT_CMP_16x8(name, sign) NULL
+#define VEC_SSE41_STRUCT_CMP_32x4(name, sign) NULL
+#define VEC_SSE41_STRUCT_CMP_64x2(name, sign) v##sign##int64x2_sse41_##name
+
+/* ------------------------------------------------------------------------ */
+
+// SSE 4.1 provides a real _mm_mullo_epi32
+#define VEC_SSE41_DEFINE_OPERATIONS_SIGN(sign, bits, size) \
+	union v##sign##int##bits##x##size##_impl_data { \
+		v##sign##int##bits##x##size vec; \
+		__m128i sse; \
+	}; \
+	\
+	VEC_STATIC_ASSERT(VEC_ALIGNOF(__m128i) <= VEC_ALIGNOF(v##sign##int##bits##x##size), "vec: v" #sign "int32x4 alignment needs to be expanded to fit intrinsic type size"); \
+	VEC_STATIC_ASSERT(sizeof(__m128i) <= sizeof(v##sign##int##bits##x##size), "vec: v" #sign "int32x4 needs to be expanded to fit intrinsic type size"); \
+	\
+	VEC_SSE41_MUL_##bits##x##size(sign) \
+	\
+	VEC_SSE41_MINMAX_##sign##bits##x##size(min) \
+	VEC_SSE41_MINMAX_##sign##bits##x##size(max) \
+	\
+	VEC_SSE41_CMPEQ_##bits##x##size(sign); \
+	\
+	const v##sign##int##bits##x##size##_impl v##sign##int##bits##x##size##_impl_sse41 = { \
+		.mul = VEC_SSE41_STRUCT_MUL_##bits##x##size(sign), \
+		.min = VEC_SSE41_STRUCT_MINMAX_##bits##x##size(min, sign), \
+		.max = VEC_SSE41_STRUCT_MINMAX_##bits##x##size(max, sign), \
+		.cmpeq = VEC_SSE41_STRUCT_CMP_##bits##x##size(cmpeq, sign), \
 	};
 
-VEC_SSE41_DEFINE_OPERATIONS()
-VEC_SSE41_DEFINE_OPERATIONS(u)
+#define VEC_SSE41_DEFINE_OPERATIONS(bits, size) \
+	VEC_SSE41_DEFINE_OPERATIONS_SIGN(u, bits, size) \
+	VEC_SSE41_DEFINE_OPERATIONS_SIGN( , bits, size)
+
+VEC_SSE41_DEFINE_OPERATIONS(8, 16)
+VEC_SSE41_DEFINE_OPERATIONS(16, 8)
+VEC_SSE41_DEFINE_OPERATIONS(32, 4)
+VEC_SSE41_DEFINE_OPERATIONS(64, 2)