view include/vec/impl/sse2.h @ 3:3c5545b1568f

*: much better alignment support & tests
author Paper <paper@tflc.us>
date Tue, 22 Oct 2024 23:27:15 -0400
parents f12b5dd4e18c
children 75ab77f874e2
line wrap: on
line source

/**
 * vec - a tiny SIMD vector library in plain C99
 * 
 * Copyright (c) 2024 Paper
 * 
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 * 
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 * 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
**/

#include <emmintrin.h>

#include <string.h> /* memcpy */

#define VEC_SSE2_ALIGNMENT 16

#define VEC_DEFINE_OPERATIONS(sign, bits, size) \
	VEC_DECL_LOAD_ALIGNED(sign, bits, size) \
	{ \
		return _mm_load_si128((const __m128i *)in); \
	} \
	\
	VEC_DECL_LOAD(sign, bits, size) \
	{ \
		return _mm_loadu_si128((const __m128i *)in); \
	} \
	\
	VEC_DECL_STORE_ALIGNED(sign, bits, size) \
	{ \
		_mm_store_si128((__m128i *)out, vec); \
	} \
	\
	VEC_DECL_STORE(sign, bits, size) \
	{ \
		_mm_storeu_si128((__m128i *)out, vec); \
	} \
	\
	VEC_DECL_ADD(sign, bits, size) \
	{ \
		return _mm_add_epi##bits(vec1, vec2); \
	} \
	\
	VEC_DECL_SUB(sign, bits, size) \
	{ \
		return _mm_sub_epi##bits(vec1, vec2); \
	} \
	\
	VEC_DECL_AND(sign, bits, size) \
	{ \
		return _mm_and_si128(vec1, vec2); \
	} \
	\
	VEC_DECL_OR(sign, bits, size) \
	{ \
		return _mm_or_si128(vec1, vec2); \
	} \
	\
	VEC_DECL_XOR(sign, bits, size) \
	{ \
		return _mm_xor_si128(vec1, vec2); \
	} \
	\
	VEC_GENERIC_SPLAT(sign, bits, size) \
	VEC_GENERIC_DIVIDE(sign, bits, size) \
	VEC_GENERIC_SHIFTS(sign, bits, size) \
	VEC_DECL_MUL(sign, bits, size); \
	VEC_GENERIC_AVG(sign, bits, size)

#define VEC_DEFINE_COMPARISONS_SIGNED(bits, size) \
	VEC_DECL_CMPEQ(, bits, size) \
	{ \
		return _mm_cmpeq_epi##bits(vec1, vec2); \
	} \
	VEC_DECL_CMPLT(, bits, size) \
	{ \
		return _mm_cmplt_epi##bits(vec1, vec2); \
	} \
	VEC_DECL_CMPGT(, bits, size) \
	{ \
		return _mm_cmpgt_epi##bits(vec1, vec2); \
	} \
	VEC_GENERIC_THAN_OR_EQUAL(, bits, size)

#ifndef VEC_VUINT8X16
# define VEC_VUINT8X16
typedef __m128i vuint8x16;
# define VUINT8x16_CONSTANT(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) \
	(_mm_setr_epi8(p, o, n, m, l, k, j, i, h, g, f, e, d, c, b, a))
VEC_DEFINE_OPERATIONS(u, 8, 16)
VEC_GENERIC_COMPARISONS(u, 8, 16)
# define VUINT8x16_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DECL_MUL(u, 8, 16)
{
	// unpack and multiply
	__m128i dst_even = _mm_mullo_epi16(vec1, vec2);
	__m128i dst_odd = _mm_mullo_epi16(_mm_srli_epi16(vec1, 8), _mm_srli_epi16(vec2, 8));

	// repack
	return _mm_or_si128(_mm_slli_epi16(dst_odd, 8), _mm_srli_epi16(_mm_slli_epi16(dst_even, 8), 8));
}
#endif

#ifndef VEC_VUINT16X8
# define VEC_VUINT16X8
typedef __m128i vuint16x8;
# define VUINT16x8_CONSTANT(a, b, c, d, e, f, g, h) \
	(_mm_setr_epi16(h, g, f, e, d, c, b, a))
VEC_DEFINE_OPERATIONS(u, 16, 8)
VEC_GENERIC_COMPARISONS(u, 16, 8)
# define VUINT16x8_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DECL_MUL(u, 16, 8)
{
	return _mm_mullo_epi16(vec1, vec2);
}
#endif

#ifndef VEC_VUINT32X4
# define VEC_VUINT32X4
typedef __m128i vuint32x4;
# define VUINT32x4_CONSTANT(a, b, c, d) \
	(_mm_setr_epi32(d, c, b, a))
VEC_DEFINE_OPERATIONS(u, 32, 4)
VEC_GENERIC_COMPARISONS(u, 32, 4)
# define VUINT32x4_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DECL_MUL(u, 32, 4)
{
	/* this was stolen from... somewhere :) */
	__m128i a13    = _mm_shuffle_epi32(vec1, 0xF5);     // (-,a3,-,a1)
	__m128i b13    = _mm_shuffle_epi32(vec2, 0xF5);     // (-,b3,-,b1)
	__m128i prod02 = _mm_mul_epu32(vec1, vec2);         // (-,a2*b2,-,a0*b0)
	__m128i prod13 = _mm_mul_epu32(a13, b13);           // (-,a3*b3,-,a1*b1)
	__m128i prod01 = _mm_unpacklo_epi32(prod02,prod13); // (-,-,a1*b1,a0*b0) 
	__m128i prod23 = _mm_unpackhi_epi32(prod02,prod13); // (-,-,a3*b3,a2*b2)
	return _mm_unpacklo_epi64(prod01, prod23);          // (ab3,ab2,ab1,ab0)
}
#endif

#ifndef VEC_VUINT64X2
# define VEC_VUINT64X2
typedef __m128i vuint64x2;
static inline VEC_ALWAYS_INLINE vuint64x2 VUINT64x2_CONSTANT(uint64_t a, uint64_t b)
{
	return _mm_setr_epi32(b, b >> 32, a, a >> 32);
}
VEC_DEFINE_OPERATIONS(u, 64, 2)
VEC_GENERIC_COMPARISONS(u, 64, 2)
# define VUINT64x2_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DECL_MUL(u, 64, 2)
{
	/* ac = (vec1 & 0xFFFFFFFF) * (vec2 & 0xFFFFFFFF); */
	__m128i ac = _mm_mul_epu32(vec1, vec2);

	/* b = vec1 >> 32; */
	__m128i b = _mm_srli_epi64(vec1, 32);

	/* bc = b * (vec2 & 0xFFFFFFFF); */
	__m128i bc = _mm_mul_epu32(b, vec2);

	/* d = vec2 >> 32; */
	__m128i d = _mm_srli_epi64(vec2, 32);

	/* ad = (vec1 & 0xFFFFFFFF) * d; */
	__m128i ad = _mm_mul_epu32(vec1, d);

	/* high = bc + ad; */
	__m128i high = _mm_add_epi64(bc, ad);

	/* high <<= 32; */
	high = _mm_slli_epi64(high, 32);

	/* return ac + high; */
	return _mm_add_epi64(high, ac);
}
#endif

#ifndef VEC_VINT8X16
# define VEC_VINT8X16
typedef __m128i vint8x16;
# define VINT8x16_CONSTANT(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) \
	(_mm_setr_epi8(p, o, n, m, l, k, j, i, h, g, f, e, d, c, b, a))
VEC_DEFINE_OPERATIONS(, 8, 16)
VEC_DEFINE_COMPARISONS_SIGNED(8, 16)
# define VINT8x16_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DECL_MUL(, 8, 16)
{
	// unpack and multiply
	__m128i dst_even = _mm_mullo_epi16(vec1, vec2);
	__m128i dst_odd = _mm_mullo_epi16(_mm_srli_epi16(vec1, 8), _mm_srli_epi16(vec2, 8));

	// repack
	return _mm_or_si128(_mm_slli_epi16(dst_odd, 8), _mm_srli_epi16(_mm_slli_epi16(dst_even, 8), 8));
}
#endif

#ifndef VEC_VINT16X8
# define VEC_VINT16X8
typedef __m128i vint16x8;
# define VINT16x8_CONSTANT(a, b, c, d, e, f, g, h) \
	(_mm_setr_epi16(h, g, f, e, d, c, b, a))
VEC_DEFINE_OPERATIONS(, 16, 8)
VEC_DEFINE_COMPARISONS_SIGNED(16, 8)
# define VINT16x8_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DECL_MUL(, 16, 8)
{
	return _mm_mullo_epi16(vec1, vec2);
}
#endif

#ifndef VEC_VINT32X4
# define VEC_VINT32X4
typedef __m128i vint32x4;
# define VINT32x4_CONSTANT(a, b, c, d) \
	(_mm_setr_epi32(d, c, b, a))
VEC_DEFINE_OPERATIONS(, 32, 4)
VEC_DEFINE_COMPARISONS_SIGNED(32, 4)
# define VINT32x4_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DECL_MUL(, 32, 4)
{
	__m128i a13    = _mm_shuffle_epi32(vec1, 0xF5);     // (-,a3,-,a1)
	__m128i b13    = _mm_shuffle_epi32(vec2, 0xF5);     // (-,b3,-,b1)
	__m128i prod02 = _mm_mul_epu32(vec1, vec2);         // (-,a2*b2,-,a0*b0)
	__m128i prod13 = _mm_mul_epu32(a13, b13);           // (-,a3*b3,-,a1*b1)
	__m128i prod01 = _mm_unpacklo_epi32(prod02,prod13); // (-,-,a1*b1,a0*b0) 
	__m128i prod23 = _mm_unpackhi_epi32(prod02,prod13); // (-,-,a3*b3,a2*b2) 
	return _mm_unpacklo_epi64(prod01, prod23);          // (ab3,ab2,ab1,ab0)
}
#endif

#ifndef VEC_VINT64X2
# define VEC_VINT64X2
typedef __m128i vint64x2;
static inline VEC_ALWAYS_INLINE vint64x2 VINT64x2_CONSTANT(int64_t a, int64_t b)
{
	return _mm_setr_epi32(b, vec_rshift(b, 32), a, vec_rshift(a, 32));
}
VEC_DEFINE_OPERATIONS(, 64, 2)
VEC_GENERIC_COMPARISONS(, 64, 2)
# define VINT64x2_ALIGNMENT VEC_SSE2_ALIGNMENT
VEC_DECL_MUL(, 64, 2)
{
	/* ac = (vec1 & 0xFFFFFFFF) * (vec2 & 0xFFFFFFFF); */
	__m128i ac = _mm_mul_epu32(vec1, vec2);

	/* b = vec1 >> 32; */
	__m128i b = _mm_srli_epi64(vec1, 32);

	/* bc = b * (vec2 & 0xFFFFFFFF); */
	__m128i bc = _mm_mul_epu32(b, vec2);

	/* d = vec2 >> 32; */
	__m128i d = _mm_srli_epi64(vec2, 32);

	/* ad = (vec1 & 0xFFFFFFFF) * d; */
	__m128i ad = _mm_mul_epu32(vec1, d);

	/* high = bc + ad; */
	__m128i high = _mm_add_epi64(bc, ad);

	/* high <<= 32; */
	high = _mm_slli_epi64(high, 32);

	/* return ac + high; */
	return _mm_add_epi64(high, ac);
}
#endif

#undef VEC_DEFINE_OPERATIONS
#undef VEC_SSE2_8x16_SHIFT
#undef VEC_SSE2_16x8_SHIFT