view include/vec/impl/sse2.h @ 0:02a517e4c492

*: initial commit
author Paper <paper@paper.us.eu.org>
date Tue, 22 Oct 2024 01:22:41 -0400
parents
children f12b5dd4e18c
line wrap: on
line source

/**
 * vec - a tiny SIMD vector library in plain C99
 * 
 * Copyright (c) 2024 Paper
 * 
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 * 
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 * 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
**/

#include <emmintrin.h>

#define VEC_DEFINE_OPERATIONS(sign, bits, size) \
	static inline VEC_ALWAYS_INLINE v##sign##int##bits##x##size v##sign##int##bits##x##size##_load(const sign##int##bits##_t in[size]) \
	{ \
		return _mm_loadu_si128((const __m128i *)in); \
	} \
	\
	static inline VEC_ALWAYS_INLINE void v##sign##int##bits##x##size##_store(v##sign##int##bits##x##size vec, sign##int##bits##_t out[size]) \
	{ \
		memcpy(out, &vec, sizeof(vec)); \
	} \
	\
	static inline VEC_ALWAYS_INLINE v##sign##int##bits##x##size v##sign##int##bits##x##size##_add(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		return _mm_add_epi##bits(vec1, vec2); \
	} \
	\
	static inline VEC_ALWAYS_INLINE v##sign##int##bits##x##size v##sign##int##bits##x##size##_sub(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
	{ \
		return _mm_sub_epi##bits(vec1, vec2); \
	}

#ifndef VEC_VINT8X16
# define VEC_VINT8X16
typedef __m128i vint8x16;
# define VINT8x16_CONSTANT(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) \
	(_mm_setr_epi8(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p))
VEC_DEFINE_OPERATIONS(, 8, 16)
# define VINT8x16_ALIGNED 1
static inline VEC_ALWAYS_INLINE vint8x16 vint8x16_mul(vint8x16 vec1, vint8x16 vec2)
{
	// unpack and multiply
	__m128i dst_even = _mm_mullo_epi16(vec1, vec2);
	__m128i dst_odd = _mm_mullo_epi16(_mm_srli_epi16(vec1, 8), _mm_srli_epi16(vec2, 8));

	// repack
	return _mm_or_si128(_mm_slli_epi16(dst_odd, 8), _mm_srli_epi16(_mm_slli_epi16(dst_even, 8), 8));
}
static inline VEC_ALWAYS_INLINE vint8x16 vint8x16_splat(int8_t c)
{
	return VINT8x16_CONSTANT(c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c);
}
#endif

#ifndef VEC_VINT16X8
# define VEC_VINT16X8
typedef __m128i vint16x8;
# define VINT16x8_CONSTANT(a, b, c, d, e, f, g, h) \
	(_mm_setr_epi16(a, b, c, d, e, f, g, h))
VEC_DEFINE_OPERATIONS(, 16, 8)
# define VINT16x8_ALIGNED 1
static inline VEC_ALWAYS_INLINE vint16x8 vint16x8_mul(vint16x8 vec1, vint16x8 vec2)
{
	return _mm_mullo_epi16(vec1, vec2);
}
static inline VEC_ALWAYS_INLINE vint16x8 vint16x8_splat(int16_t c)
{
	return VINT16x8_CONSTANT(c, c, c, c, c, c, c, c);
}
#endif

#ifndef VEC_VINT32X4
# define VEC_VINT32X4
typedef __m128i vint32x4;
# define VINT32x4_CONSTANT(a, b, c, d) \
	(_mm_setr_epi32(a, b, c, d))
VEC_DEFINE_OPERATIONS(, 32, 4)
# define VINT32x4_ALIGNED 1
static inline VEC_ALWAYS_INLINE vint32x4 vint32x4_mul(vint32x4 a, vint32x4 b)
{
	__m128i a13    = _mm_shuffle_epi32(a, 0xF5);        // (-,a3,-,a1)
	__m128i b13    = _mm_shuffle_epi32(b, 0xF5);        // (-,b3,-,b1)
	__m128i prod02 = _mm_mul_epu32(a, b);               // (-,a2*b2,-,a0*b0)
	__m128i prod13 = _mm_mul_epu32(a13, b13);           // (-,a3*b3,-,a1*b1)
	__m128i prod01 = _mm_unpacklo_epi32(prod02,prod13); // (-,-,a1*b1,a0*b0) 
	__m128i prod23 = _mm_unpackhi_epi32(prod02,prod13); // (-,-,a3*b3,a2*b2) 
	return _mm_unpacklo_epi64(prod01, prod23);          // (ab3,ab2,ab1,ab0)
}
static inline VEC_ALWAYS_INLINE vint32x4 vint32x4_splat(int32_t c)
{
	return VINT32x4_CONSTANT(c, c, c, c);
}
#endif

#ifndef VEC_VINT64X2
# define VEC_VINT64X2
typedef __m128i vint64x2;
static inline VEC_ALWAYS_INLINE vint64x2 VINT64x2_CONSTANT(int64_t a, int64_t b)
{
	return _mm_setr_epi32(b, b >> 32, a, a >> 32);
}
VEC_DEFINE_OPERATIONS(, 64, 2)
# define VINT64x2_ALIGNED 1
static inline VEC_ALWAYS_INLINE vint64x2 vint64x2_mul(vint64x2 ab, vint64x2 cd)
{
	/* ac = (ab & 0xFFFFFFFF) * (cd & 0xFFFFFFFF); */
	__m128i ac = _mm_mul_epu32(ab, cd);

	/* b = ab >> 32; */
	__m128i b = _mm_srli_epi64(ab, 32);

	/* bc = b * (cd & 0xFFFFFFFF); */
	__m128i bc = _mm_mul_epu32(b, cd);

	/* d = cd >> 32; */
	__m128i d = _mm_srli_epi64(cd, 32);

	/* ad = (ab & 0xFFFFFFFF) * d; */
	__m128i ad = _mm_mul_epu32(ab, d);

	/* high = bc + ad; */
	__m128i high = _mm_add_epi64(bc, ad);

	/* high <<= 32; */
	high = _mm_slli_epi64(high, 32);

	/* return ac + high; */
	return _mm_add_epi64(high, ac);
}
static inline VEC_ALWAYS_INLINE vint64x2 vint64x2_splat(int64_t c)
{
	return VINT64x2_CONSTANT(c, c);
}
#endif

#ifndef VEC_VUINT8X16
# define VEC_VUINT8X16
typedef __m128i vuint8x16;
# define VUINT8x16_CONSTANT(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) \
	(_mm_setr_epi8(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p))
VEC_DEFINE_OPERATIONS(u, 8, 16)
# define VINT8x16_ALIGNED 1
static inline VEC_ALWAYS_INLINE vint8x16 vuint8x16_mul(vuint8x16 vec1, vuint8x16 vec2)
{
	// unpack and multiply
	__m128i dst_even = _mm_mullo_epi16(vec1, vec2);
	__m128i dst_odd = _mm_mullo_epi16(_mm_srli_epi16(vec1, 8), _mm_srli_epi16(vec2, 8));

	// repack
	return _mm_or_si128(_mm_slli_epi16(dst_odd, 8), _mm_srli_epi16(_mm_slli_epi16(dst_even, 8), 8));
}
static inline VEC_ALWAYS_INLINE vuint8x16 vuint8x16_splat(uint8_t c)
{
	return VUINT8x16_CONSTANT(c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c);
}
#endif

#ifndef VEC_VUINT16X8
# define VEC_VUINT16X8
typedef __m128i vuint16x8;
# define VUINT16x8_CONSTANT(a, b, c, d, e, f, g, h) \
	(_mm_setr_epi16(a, b, c, d, e, f, g, h))
VEC_DEFINE_OPERATIONS(u, 16, 8)
# define VINT16x8_ALIGNED 1
static inline VEC_ALWAYS_INLINE vuint16x8 vuint16x8_mul(vuint16x8 vec1, vuint16x8 vec2)
{
	return _mm_mullo_epi16(vec1, vec2);
}
static inline VEC_ALWAYS_INLINE vuint16x8 vuint16x8_splat(uint16_t c)
{
	return VUINT16x8_CONSTANT(c, c, c, c, c, c, c, c);
}
#endif

#ifndef VEC_VUINT32X4
# define VEC_VUINT32X4
typedef __m128i vuint32x4;
# define VUINT32x4_CONSTANT(a, b, c, d) \
	(_mm_setr_epi32(a, b, c, d))
VEC_DEFINE_OPERATIONS(u, 32, 4)
# define VUINT32x4_ALIGNED 1
static inline VEC_ALWAYS_INLINE vuint32x4 vuint32x4_mul(vuint32x4 a, vuint32x4 b)
{
	/* this was stolen from... somewhere :) */
	__m128i a13    = _mm_shuffle_epi32(a, 0xF5);        // (-,a3,-,a1)
	__m128i b13    = _mm_shuffle_epi32(b, 0xF5);        // (-,b3,-,b1)
	__m128i prod02 = _mm_mul_epu32(a, b);               // (-,a2*b2,-,a0*b0)
	__m128i prod13 = _mm_mul_epu32(a13, b13);           // (-,a3*b3,-,a1*b1)
	__m128i prod01 = _mm_unpacklo_epi32(prod02,prod13); // (-,-,a1*b1,a0*b0) 
	__m128i prod23 = _mm_unpackhi_epi32(prod02,prod13); // (-,-,a3*b3,a2*b2)
	return _mm_unpacklo_epi64(prod01, prod23);          // (ab3,ab2,ab1,ab0)
}
static inline VEC_ALWAYS_INLINE vuint32x4 vuint32x4_splat(int32_t c)
{
	return VUINT32x4_CONSTANT(c, c, c, c);
}
#endif

#ifndef VEC_VUINT64X2
# define VEC_VUINT64X2
typedef __m128i vuint64x2;
static inline VEC_ALWAYS_INLINE vint64x2 VUINT64x2_CONSTANT(int64_t a, int64_t b)
{
	return _mm_setr_epi32(b, b >> 32, a, a >> 32);
}
VEC_DEFINE_OPERATIONS(u, 64, 2)
# define VUINT64x2_ALIGNED 1
static inline VEC_ALWAYS_INLINE vuint64x2 vuint64x2_mul(vuint64x2 ab, vuint64x2 cd)
{
	/* ac = (ab & 0xFFFFFFFF) * (cd & 0xFFFFFFFF); */
	__m128i ac = _mm_mul_epu32(ab, cd);

	/* b = ab >> 32; */
	__m128i b = _mm_srli_epi64(ab, 32);

	/* bc = b * (cd & 0xFFFFFFFF); */
	__m128i bc = _mm_mul_epu32(b, cd);

	/* d = cd >> 32; */
	__m128i d = _mm_srli_epi64(cd, 32);

	/* ad = (ab & 0xFFFFFFFF) * d; */
	__m128i ad = _mm_mul_epu32(ab, d);

	/* high = bc + ad; */
	__m128i high = _mm_add_epi64(bc, ad);

	/* high <<= 32; */
	high = _mm_slli_epi64(high, 32);

	/* return ac + high; */
	return _mm_add_epi64(high, ac);
}
static inline VEC_ALWAYS_INLINE vuint64x2 vuint64x2_splat(uint64_t c)
{
	return VUINT64x2_CONSTANT(c, c);
}
#endif

#undef VEC_DEFINE_OPERATIONS