view include/vec/impl/x86/sse2.h @ 15:e05c257c6a23

*: huge refactor, add many new x86 intrinsics and the like ALSO!! now intrinsics are enabled at runtime, depending on what is detected. altivec *should* still work but I only tested compiling it. the major version has been updated to 2.0 for this...
author Paper <paper@tflc.us>
date Wed, 20 Nov 2024 04:10:37 -0500
parents
children 41dd962abdd1
line wrap: on
line source

/**
 * vec - a tiny SIMD vector library in C99
 * 
 * Copyright (c) 2024 Paper
 * 
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 * 
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 * 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
**/

#ifndef VEC_IMPL_X86_SSE2_H_
#define VEC_IMPL_X86_SSE2_H_

#define VEC_SSE2_OPERATION_8x16(op, sign) \
	do { \
		/* unpack and multiply */ \
		__m128i dst_even = _mm_##op##_epi16(vec1.sse, vec2.sse); \
		__m128i dst_odd = _mm_##op##_epi16(_mm_srli_epi16(vec1.sse, 8), _mm_srli_epi16(vec2.sse, 8)); \
	\
		/* repack */ \
		return (v##sign##int8x16){ .sse = _mm_or_si128( \
			_mm_slli_epi16(dst_odd, 8), \
			_mm_srli_epi16(_mm_slli_epi16(dst_even, 8), 8) \
		)}; \
	} while (0)

// shifting
#define VEC_SSE2_LSHIFT_8x16(sign) \
	VEC_SSE2_OPERATION_8x16(sll, sign)

#define VEC_SSE2_LSHIFT_16x8(sign) \
	do { \
		return (v##sign##int16x8){ .sse = _mm_sll_epi16(vec1.sse, vec2.sse) }; \
	} while (0)

#define VEC_SSE2_LSHIFT_32x4(sign) \
	do { \
		return (v##sign##int32x4){ .sse = _mm_sll_epi32(vec1.sse, vec2.sse) }; \
	} while (0)

#define VEC_SSE2_LSHIFT_64x2(sign) \
	do { \
		return (v##sign##int64x2){ .sse = _mm_sll_epi64(vec1.sse, vec2.sse) }; \
	} while (0)

#define VEC_SSE2_RSHIFT_8x16(sign, aORl) \
	VEC_SSE2_OPERATION_8x16(sr##aORl, sign)

#define VEC_SSE2_RSHIFT_16x8(sign, aORl) \
	do { \
		return (v##sign##int16x8){ .sse = _mm_sr##aORl##_epi16(vec1.sse, vec2.sse) }; \
	} while (0)

#define VEC_SSE2_RSHIFT_32x4(sign, aORl) \
	do { \
		return (v##sign##int32x4){ .sse = _mm_sr##aORl##_epi32(vec1.sse, vec2.sse) }; \
	} while (0)

#define VEC_SSE2_aRSHIFT_64x2(sign) \
	do { \
		return v##sign##int64x2_fallback_rshift(vec1, vec2); \
	} while (0)

#define VEC_SSE2_lRSHIFT_64x2(sign) \
	do { \
		return (v##sign##int64x2){ .sse = _mm_srl_epi64(vec1.sse, vec2.sse) }; \
	} while (0)

#define VEC_SSE2_RSHIFT_64x2(sign, aORl) \
	VEC_SSE2_##aORl##RSHIFT_64x2(sign)

// shared between SSE2 variations
#define VEC_SSE2_MUL_8x16(sign) \
	VEC_SSE2_OPERATION_8x16(mullo, sign)

#define VEC_SSE2_MUL_16x8(sign) \
	do { \
		/* we have a real instruction for this */ \
		return (v##sign##int16x8){ .sse = _mm_mullo_epi16(vec1.sse, vec2.sse) }; \
	} while (0)

#define VEC_SSE2_MUL_32x4(sign) \
	do { \
		/* this was stolen from... somewhere :) */ \
		__m128i a13    = _mm_shuffle_epi32(vec1.sse, 0xF5); /* (-,a3,-,a1) */ \
		__m128i b13    = _mm_shuffle_epi32(vec2.sse, 0xF5); /* (-,b3,-,b1) */ \
		__m128i prod02 = _mm_mul_epu32(vec1.sse, vec2.sse); /* (-,a2*b2,-,a0*b0) */ \
		__m128i prod13 = _mm_mul_epu32(a13, b13);           /* (-,a3*b3,-,a1*b1) */ \
		__m128i prod01 = _mm_unpacklo_epi32(prod02,prod13); /* (-,-,a1*b1,a0*b0) */ \
		__m128i prod23 = _mm_unpackhi_epi32(prod02,prod13); /* (-,-,a3*b3,a2*b2) */ \
		return (v##sign##int32x4) { .sse = _mm_unpacklo_epi64(prod01, prod23) }; /* (ab3,ab2,ab1,ab0) */ \
	} while (0)

#define VEC_SSE2_MUL_64x2(sign) \
	do { \
		__m128i ac = _mm_mul_epu32(vec1.sse, vec2.sse); /* ac = (vec1 & UINT32_MAX) * (vec2 & UINT32_MAX); */ \
		__m128i b  = _mm_srli_epi64(vec1.sse, 32);      /* b = vec1 >> 32; */ \
		__m128i bc = _mm_mul_epu32(b, vec2.sse);        /* bc = b * (vec2 & UINT32_MAX); */ \
		__m128i d  = _mm_srli_epi64(vec2.sse, 32);      /* d = vec2 >> 32; */ \
		__m128i ad = _mm_mul_epu32(vec1.sse, d);        /* ad = (vec1 & UINT32_MAX) * d; */ \
		__m128i hi = _mm_add_epi64(bc, ad);             /* hi = bc + ad; */ \
		hi = _mm_slli_epi64(hi, 32);                    /* hi <<= 32; */ \
		return (v##sign##int64x2) { .sse = _mm_add_epi64(hi, ac) }; /* return ac + hi; */ \
	} while (0)

#define VEC_SSE2_DEFINE_OPERATIONS_SIGN(sign, bits, size) \
	static v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_load_aligned(const sign##int##bits##_t in[size]) \
	{ \
		return (v##sign##int##bits##x##size) { .sse = _mm_load_si128((const __m128i *)in) }; \
	} \
	\
	static v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_load(const sign##int##bits##_t in[size]) \
	{ \
		return (v##sign##int##bits##x##size) { .sse = _mm_loadu_si128((const __m128i *)in) }; \
	} \
	\
	static void v##sign##int##bits##x##size##_sse2_store_aligned(v##sign##int##bits##x##size vec, sign##int##bits##_t out[size]) \
	{ \
		_mm_store_si128((__m128i *)out, vec.sse); \
	} \
	\
	static void v##sign##int##bits##x##size##_sse2_store(v##sign##int##bits##x##size vec, sign##int##bits##_t out[size]) \
	{ \
		_mm_storeu_si128((__m128i *)out, vec.sse); \
	} \
	\
	static v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_add(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		return (v##sign##int##bits##x##size) { .sse = _mm_add_epi##bits(vec1.sse, vec2.sse) }; \
	} \
	\
	static v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_sub(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		return (v##sign##int##bits##x##size) { .sse = _mm_sub_epi##bits(vec1.sse, vec2.sse) }; \
	} \
	\
	static v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_mul(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		VEC_SSE2_MUL_##bits##x##size(sign); \
	} \
	\
	static v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_and(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		return (v##sign##int##bits##x##size) { .sse = _mm_and_si128(vec1.sse, vec2.sse) }; \
	} \
	\
	static v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_or(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		return (v##sign##int##bits##x##size) { .sse = _mm_or_si128(vec1.sse, vec2.sse) }; \
	} \
	\
	static v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_xor(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		return (v##sign##int##bits##x##size) { .sse = _mm_xor_si128(vec1.sse, vec2.sse) }; \
	} \
	\
	static v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_lshift(v##sign##int##bits##x##size vec1, vuint##bits##x##size vec2) \
	{ \
		VEC_SSE2_LSHIFT_##bits##x##size(sign); \
	} \
	\
	static v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_rshift(v##sign##int##bits##x##size vec1, vuint##bits##x##size vec2) \
	{ \
		VEC_SSE2_RSHIFT_##bits##x##size(sign, a); \
	} \
	\
	static v##sign##int##bits##x##size v##sign##int##bits##x##size##_sse2_lrshift(v##sign##int##bits##x##size vec1, vuint##bits##x##size vec2) \
	{ \
		VEC_SSE2_RSHIFT_##bits##x##size(sign, l); \
	} \
	\
	static v##sign##int##bits##x##size##_impl v##sign##int##bits##x##size##_impl_sse2 = { \
		.load_aligned  = v##sign##int##bits##x##size##_sse2_load_aligned, \
		.load          = v##sign##int##bits##x##size##_sse2_load, \
		.store_aligned = v##sign##int##bits##x##size##_sse2_store_aligned, \
		.store         = v##sign##int##bits##x##size##_sse2_store, \
		.add           = v##sign##int##bits##x##size##_sse2_add, \
		.sub           = v##sign##int##bits##x##size##_sse2_sub, \
		.mul           = v##sign##int##bits##x##size##_sse2_mul, \
		.and           = v##sign##int##bits##x##size##_sse2_and, \
		.or            = v##sign##int##bits##x##size##_sse2_or, \
		.xor           = v##sign##int##bits##x##size##_sse2_xor, \
		.lshift        = v##sign##int##bits##x##size##_sse2_lshift, \
		.rshift        = v##sign##int##bits##x##size##_sse2_rshift, \
		.lrshift       = v##sign##int##bits##x##size##_sse2_lrshift, \
	};

#define VEC_SSE2_DEFINE_OPERATIONS(bits, size) \
	VEC_SSE2_DEFINE_OPERATIONS_SIGN( , bits, size) \
	VEC_SSE2_DEFINE_OPERATIONS_SIGN(u, bits, size)

// SSE is *only* 128-bit
VEC_SSE2_DEFINE_OPERATIONS(8, 16)
VEC_SSE2_DEFINE_OPERATIONS(16, 8)
VEC_SSE2_DEFINE_OPERATIONS(32, 4)
VEC_SSE2_DEFINE_OPERATIONS(64, 2)

#undef VEC_SSE2_DEFINE_OPERATIONS
#undef VEC_SSE2_DEFINE_OPERATIONS_SIGN
#undef VEC_SSE2_MUL_8x16
#undef VEC_SSE2_MUL_16x8
#undef VEC_SSE2_MUL_32x4
#undef VEC_SSE2_MUL_64x2
#undef VEC_SSE2_OPERATION_8x16
#undef VEC_SSE2_LSHIFT_8x16
#undef VEC_SSE2_LSHIFT_16x8
#undef VEC_SSE2_LSHIFT_32x4
#undef VEC_SSE2_LSHIFT_64x2
#undef VEC_SSE2_RSHIFT_8x16
#undef VEC_SSE2_RSHIFT_16x8
#undef VEC_SSE2_RSHIFT_32x4
#undef VEC_SSE2_aRSHIFT_64x2
#undef VEC_SSE2_lRSHIFT_64x2
#undef VEC_SSE2_RSHIFT_64x2

#endif /* VEC_IMPL_X86_SSE2_H_ */