view include/vec/impl/sse2.h @ 7:945d410803f8

*: fix clang & gcc warnings, add avg test, etc
author Paper <paper@tflc.us>
date Wed, 23 Oct 2024 20:07:08 -0400
parents 1f070512497f
children 53197dbf4e8e
line wrap: on
line source

/**
 * vec - a tiny SIMD vector library in plain C99
 * 
 * Copyright (c) 2024 Paper
 * 
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 * 
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 * 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
**/

#include <emmintrin.h>

#define VEC_SSE2_ALIGNMENT 16

#define VEC_SSE2_MUL_8x16(sign) \
	VEC_DECL_MUL(sign, 8, 16) \
	{ \
		/* unpack and multiply */ \
		__m128i dst_even = _mm_mullo_epi16(vec1, vec2); \
		__m128i dst_odd = _mm_mullo_epi16(_mm_srli_epi16(vec1, 8), _mm_srli_epi16(vec2, 8)); \
	\
		/* repack */ \
		return _mm_or_si128( \
			_mm_slli_epi16(dst_odd, 8), \
			_mm_srli_epi16(_mm_slli_epi16(dst_even, 8), 8) \
		); \
	}

#define VEC_SSE2_MUL_16x8(sign) \
	VEC_DECL_MUL(sign, 16, 8) \
	{ \
		/* we have a real instruction for this */ \
		return _mm_mullo_epi16(vec1, vec2); \
	}

#define VEC_SSE2_MUL_32x4(sign) \
	VEC_DECL_MUL(sign, 32, 4) \
	{ \
		/* this was stolen from... somewhere :) */ \
		__m128i a13    = _mm_shuffle_epi32(vec1, 0xF5);     /* (-,a3,-,a1) */ \
		__m128i b13    = _mm_shuffle_epi32(vec2, 0xF5);     /* (-,b3,-,b1) */ \
		__m128i prod02 = _mm_mul_epu32(vec1, vec2);         /* (-,a2*b2,-,a0*b0) */ \
		__m128i prod13 = _mm_mul_epu32(a13, b13);           /* (-,a3*b3,-,a1*b1) */ \
		__m128i prod01 = _mm_unpacklo_epi32(prod02,prod13); /* (-,-,a1*b1,a0*b0) */ \
		__m128i prod23 = _mm_unpackhi_epi32(prod02,prod13); /* (-,-,a3*b3,a2*b2) */ \
		return _mm_unpacklo_epi64(prod01, prod23);          /* (ab3,ab2,ab1,ab0) */ \
	}

#define VEC_SSE2_MUL_64x2(sign) \
	VEC_DECL_MUL(sign, 64, 2) \
	{ \
		__m128i ac = _mm_mul_epu32(vec1, vec2); /* ac = (vec1 & UINT32_MAX) * (vec2 & UINT32_MAX); */ \
		__m128i b  = _mm_srli_epi64(vec1, 32);  /* b = vec1 >> 32; */ \
		__m128i bc = _mm_mul_epu32(b, vec2);    /* bc = b * (vec2 & UINT32_MAX); */ \
		__m128i d  = _mm_srli_epi64(vec2, 32);  /* d = vec2 >> 32; */ \
		__m128i ad = _mm_mul_epu32(vec1, d);    /* ad = (vec1 & UINT32_MAX) * d; */ \
		__m128i hi = _mm_add_epi64(bc, ad);     /* hi = bc + ad; */ \
		hi = _mm_slli_epi64(hi, 32);            /* hi <<= 32; */ \
		return _mm_add_epi64(hi, ac);           /* return ac + hi; */ \
	}

#define VEC_DEFINE_OPERATIONS(sign, csign, bits, size) \
	VEC_DECL_LOAD_ALIGNED(sign, bits, size) \
	{ \
		return _mm_load_si128((const __m128i *)in); \
	} \
	\
	VEC_DECL_LOAD(sign, bits, size) \
	{ \
		return _mm_loadu_si128((const __m128i *)in); \
	} \
	\
	VEC_DECL_STORE_ALIGNED(sign, bits, size) \
	{ \
		_mm_store_si128((__m128i *)out, vec); \
	} \
	\
	VEC_DECL_STORE(sign, bits, size) \
	{ \
		_mm_storeu_si128((__m128i *)out, vec); \
	} \
	\
	VEC_DECL_ADD(sign, bits, size) \
	{ \
		return _mm_add_epi##bits(vec1, vec2); \
	} \
	\
	VEC_DECL_SUB(sign, bits, size) \
	{ \
		return _mm_sub_epi##bits(vec1, vec2); \
	} \
	\
	VEC_DECL_AND(sign, bits, size) \
	{ \
		return _mm_and_si128(vec1, vec2); \
	} \
	\
	VEC_DECL_OR(sign, bits, size) \
	{ \
		return _mm_or_si128(vec1, vec2); \
	} \
	\
	VEC_DECL_XOR(sign, bits, size) \
	{ \
		return _mm_xor_si128(vec1, vec2); \
	} \
	\
	VEC_SSE2_MUL_##bits##x##size(sign) \
	\
	VEC_GENERIC_SPLAT(sign, csign, bits, size) \
	VEC_GENERIC_DIVIDE(sign, csign, bits, size) \
	VEC_GENERIC_SHIFTS(sign, csign, bits, size) \
	VEC_GENERIC_AVG(sign, bits, size)

#define VEC_DEFINE_COMPARISONS_SIGNED(bits, size) \
	VEC_DECL_CMPEQ(, bits, size) \
	{ \
		return _mm_cmpeq_epi##bits(vec1, vec2); \
	} \
	VEC_DECL_CMPLT(, bits, size) \
	{ \
		return _mm_cmplt_epi##bits(vec1, vec2); \
	} \
	VEC_DECL_CMPGT(, bits, size) \
	{ \
		return _mm_cmpgt_epi##bits(vec1, vec2); \
	} \
	VEC_GENERIC_THAN_OR_EQUAL(, bits, size)

#ifndef VEC_VUINT8X16
# define VEC_VUINT8X16
typedef __m128i vuint8x16;
# define VUINT8x16_CONSTANT(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) \
	(_mm_setr_epi8(p, o, n, m, l, k, j, i, h, g, f, e, d, c, b, a))
# define VUINT8x16_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DEFINE_OPERATIONS(u, U, 8, 16)
VEC_GENERIC_COMPARISONS(u, U, 8, 16)
#endif

#ifndef VEC_VUINT16X8
# define VEC_VUINT16X8
typedef __m128i vuint16x8;
# define VUINT16x8_CONSTANT(a, b, c, d, e, f, g, h) \
	(_mm_setr_epi16(h, g, f, e, d, c, b, a))
# define VUINT16x8_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DEFINE_OPERATIONS(u, U, 16, 8)
VEC_GENERIC_COMPARISONS(u, U, 16, 8)
#endif

#ifndef VEC_VUINT32X4
# define VEC_VUINT32X4
typedef __m128i vuint32x4;
# define VUINT32x4_CONSTANT(a, b, c, d) \
	(_mm_setr_epi32(d, c, b, a))
# define VUINT32x4_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DEFINE_OPERATIONS(u, U, 32, 4)
VEC_GENERIC_COMPARISONS(u, U, 32, 4)
#endif

#ifndef VEC_VUINT64X2
# define VEC_VUINT64X2
typedef __m128i vuint64x2;
VEC_FUNC_KEYWORDS vuint64x2 VUINT64x2_CONSTANT(uint64_t a, uint64_t b)
{
	return _mm_setr_epi32(b, b >> 32, a, a >> 32);
}
# define VUINT64x2_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DEFINE_OPERATIONS(u, U, 64, 2)
VEC_GENERIC_COMPARISONS(u, U, 64, 2)
#endif

#ifndef VEC_VINT8X16
# define VEC_VINT8X16
typedef __m128i vint8x16;
# define VINT8x16_CONSTANT(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) \
	(_mm_setr_epi8(p, o, n, m, l, k, j, i, h, g, f, e, d, c, b, a))
# define VINT8x16_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DEFINE_OPERATIONS(, , 8, 16)
VEC_DEFINE_COMPARISONS_SIGNED(8, 16)
#endif

#ifndef VEC_VINT16X8
# define VEC_VINT16X8
typedef __m128i vint16x8;
# define VINT16x8_CONSTANT(a, b, c, d, e, f, g, h) \
	(_mm_setr_epi16(h, g, f, e, d, c, b, a))
# define VINT16x8_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DEFINE_OPERATIONS(, , 16, 8)
VEC_DEFINE_COMPARISONS_SIGNED(16, 8)
#endif

#ifndef VEC_VINT32X4
# define VEC_VINT32X4
typedef __m128i vint32x4;
# define VINT32x4_CONSTANT(a, b, c, d) \
	(_mm_setr_epi32(d, c, b, a))
# define VINT32x4_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DEFINE_OPERATIONS(, , 32, 4)
VEC_DEFINE_COMPARISONS_SIGNED(32, 4)
#endif

#ifndef VEC_VINT64X2
# define VEC_VINT64X2
typedef __m128i vint64x2;
# define VINT64x2_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_FUNC_KEYWORDS vint64x2 VINT64x2_CONSTANT(int64_t a, int64_t b)
{
	return _mm_setr_epi32(b, vec_rshift(b, 32), a, vec_rshift(a, 32));
}
VEC_DEFINE_OPERATIONS(, , 64, 2)
VEC_GENERIC_COMPARISONS(, , 64, 2)
#endif

#undef VEC_DEFINE_OPERATIONS
#undef VEC_DEFINE_COMPARISONS_SIGNED

/* multiply */
#undef VEC_SSE2_MUL_8x16
#undef VEC_SSE2_MUL_16x8
#undef VEC_SSE2_MUL_32x4
#undef VEC_SSE2_MUL_64x2