diff utils/gengeneric.c @ 37:4b5a557aa64f

*: turns out extern is a practical joke. rewrite to be always inline again the sample benchmark performs about 3x as well with optimizations disabled :)
author Paper <paper@tflc.us>
date Sat, 26 Apr 2025 01:04:35 -0400
parents
children fd42f9b1b95e
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/utils/gengeneric.c	Sat Apr 26 01:04:35 2025 -0400
@@ -0,0 +1,429 @@
+/**
+ * vec - a tiny SIMD vector library in plain C99
+ * 
+ * Copyright (c) 2024 Paper
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+**/
+
+/* Use this file to generate include/vec/impl/generic.h !!
+ *
+ * `gcc -o gengeneric gengeneric.c` */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
+
+/* XXX: would it be faster to unroll literally everything instead of defining everything,
+ * and then unpacking it all? */
+static const char *header =
+	"/**\n"
+	" * vec - a tiny SIMD vector library in plain C99\n"
+	" * \n"
+	" * Copyright (c) 2024 Paper\n"
+	" * \n"
+	" * Permission is hereby granted, free of charge, to any person obtaining a copy\n"
+	" * of this software and associated documentation files (the \"Software\"), to deal\n"
+	" * in the Software without restriction, including without limitation the rights\n"
+	" * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n"
+	" * copies of the Software, and to permit persons to whom the Software is\n"
+	" * furnished to do so, subject to the following conditions:\n"
+	" * \n"
+	" * The above copyright notice and this permission notice shall be included in all\n"
+	" * copies or substantial portions of the Software.\n"
+	" * \n"
+	" * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n"
+	" * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n"
+	" * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n"
+	" * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n"
+	" * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n"
+	" * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n"
+	" * SOFTWARE.\n"
+	"**/\n"
+	"\n"
+	"/* This file is automatically generated! Do not edit it directly!\n"
+	" * Edit the code that generates it in utils/gengeneric.c  --paper */\n"
+	"\n"
+	"#ifndef VEC_IMPL_GENERIC_H_\n"
+	"#define VEC_IMPL_GENERIC_H_\n"
+	"\n"
+	"#include <string.h>\n"
+	"\n"
+	"// -----------------------------------------------------------------\n"
+	"\n"
+	"#define VEC_GENERIC_OPERATION(op, sign, bits, size) \\\n"
+	"	do { \\\n"
+	"		int i; \\\n"
+	"	\\\n"
+	"		for (i = 0; i < size; i++) \\\n"
+	"			vec1.generic[i] = (op); \\\n"
+	"	\\\n"
+	"		return vec1; \\\n"
+	"	} while (0)\n"
+	"\n"
+	"#define VEC_GENERIC_BUILTIN_OPERATION(op, sign, bits, size) \\\n"
+	"	VEC_GENERIC_OPERATION(vec1.generic[i] op vec2.generic[i], sign, bits, size)\n"
+	"\n"
+	"#define VEC_GENERIC_CMP(op, sign, bits, size) \\\n"
+	"	VEC_GENERIC_OPERATION((vec1.generic[i] op vec2.generic[i]) ? (vec_##sign##int##bits)VEC_MAX_OF_TYPE(vec_uint##bits) : 0, sign, bits, size)\n"
+	"\n"
+	"/* okay, now we can do this crap: */\n"
+	"\n"
+	"#define VEC_GENERIC_SPLAT(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_splat(vec_##sign##int##bits x) \\\n"
+	"	{ \\\n"
+	"		v##sign##int##bits##x##size vec; \\\n"
+	"		for (int i = 0; i < size; i++) \\\n"
+	"			vec.generic[i] = x; \\\n"
+	"		return vec; \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_LOAD_EX(name, sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_##name(const vec_##sign##int##bits in[size]) \\\n"
+	"	{ \\\n"
+	"		v##sign##int##bits##x##size vec; \\\n"
+	"		memcpy(&vec, in, sizeof(vec_##sign##int##bits) * size); \\\n"
+	"		return vec; \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_LOAD_ALIGNED(sign, bits, size) VEC_GENERIC_LOAD_EX(load_aligned, sign, bits, size)\n"
+	"#define VEC_GENERIC_LOAD(sign, bits, size) VEC_GENERIC_LOAD_EX(load, sign, bits, size)\n"
+	"\n"
+	"#define VEC_GENERIC_STORE_EX(name, sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL void v##sign##int##bits##x##size##_##name(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \\\n"
+	"	{ \\\n"
+	"		memcpy(out, &vec, sizeof(vec_##sign##int##bits) * size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_STORE_ALIGNED(sign, bits, size) VEC_GENERIC_STORE_EX(store_aligned, sign, bits, size)\n"
+	"#define VEC_GENERIC_STORE(sign, bits, size) VEC_GENERIC_STORE_EX(store, sign, bits, size)\n"
+	"\n"
+	"#define VEC_GENERIC_ADD(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_add(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		VEC_GENERIC_BUILTIN_OPERATION(+, sign, bits, size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_SUB(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_sub(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		VEC_GENERIC_BUILTIN_OPERATION(-, sign, bits, size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_MUL(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_mul(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		VEC_GENERIC_BUILTIN_OPERATION(*, sign, bits, size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_DIV(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_div(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		VEC_GENERIC_OPERATION(vec2.generic[i] ? (vec1.generic[i] / vec2.generic[i]) : 0, sign, bits, size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_AVG(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_avg(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		for (int i = 0; i < size; i++) \\\n"
+	"			vec1.generic[i] = vec_##sign##avg(vec1.generic[i], vec2.generic[i]); \\\n"
+	"	\\\n"
+	"		return vec1; \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_AND(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_and(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		VEC_GENERIC_BUILTIN_OPERATION(&, sign, bits, size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_OR(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_or(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		VEC_GENERIC_BUILTIN_OPERATION(|, sign, bits, size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_XOR(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_xor(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		VEC_GENERIC_BUILTIN_OPERATION(^, sign, bits, size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_NOT(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_not(v##sign##int##bits##x##size vec) \\\n"
+	"	{ \\\n"
+	"		return v##sign##int##bits##x##size##_xor(vec, v##sign##int##bits##x##size##_splat((vec_##sign##int##bits)VEC_MAX_OF_TYPE(vec_uint##bits))); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_CMPLT(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_cmplt(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		VEC_GENERIC_CMP(<, sign, bits, size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_CMPLE(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_cmple(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		return v##sign##int##bits##x##size##_not(v##sign##int##bits##x##size##_cmpgt(vec1, vec2)); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_CMPEQ(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_cmpeq(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		VEC_GENERIC_CMP(==, sign, bits, size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_CMPGE(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_cmpge(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		return v##sign##int##bits##x##size##_not(v##sign##int##bits##x##size##_cmplt(vec1, vec2)); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_CMPGT(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_cmpgt(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		VEC_GENERIC_CMP(>, sign, bits, size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_LSHIFT(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_lshift(v##sign##int##bits##x##size vec1, vuint##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		VEC_GENERIC_OPERATION(vec_##sign##lshift(vec1.generic[i], vec2.generic[i]), sign, bits, size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_RSHIFT(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_rshift(v##sign##int##bits##x##size vec1, vuint##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		VEC_GENERIC_OPERATION(vec_##sign##rshift(vec1.generic[i], vec2.generic[i]), sign, bits, size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_LRSHIFT(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_lrshift(v##sign##int##bits##x##size vec1, vuint##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		VEC_GENERIC_OPERATION(vec_urshift((vec_uint##bits)vec1.generic[i], vec2.generic[i]), sign, bits, size); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_MIN(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_min(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		v##sign##int##bits##x##size cmplt = v##sign##int##bits##x##size##_cmplt(vec1, vec2); \\\n"
+	"	\\\n"
+	"		v##sign##int##bits##x##size a = v##sign##int##bits##x##size##_and(vec1, cmplt); \\\n"
+	"		v##sign##int##bits##x##size b = v##sign##int##bits##x##size##_and(vec2, v##sign##int##bits##x##size##_not(cmplt)); \\\n"
+	"	\\\n"
+	"		return v##sign##int##bits##x##size##_or(a, b); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_MAX(sign, bits, size) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_max(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		v##sign##int##bits##x##size cmplt = v##sign##int##bits##x##size##_cmpgt(vec1, vec2); \\\n"
+	"	\\\n"
+	"		v##sign##int##bits##x##size a = v##sign##int##bits##x##size##_and(vec1, cmplt); \\\n"
+	"		v##sign##int##bits##x##size b = v##sign##int##bits##x##size##_and(vec2, v##sign##int##bits##x##size##_not(cmplt)); \\\n"
+	"	\\\n"
+	"		return v##sign##int##bits##x##size##_or(a, b); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_DBL_SPLAT(sign, bits, size, halfsize) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_splat(vec_##sign##int##bits x) \\\n"
+	"	{ \\\n"
+	"		v##sign##int##bits##x##size vec; \\\n"
+	"	\\\n"
+	"		vec.generic[0] = v##sign##int##bits##x##halfsize##_splat(x); \\\n"
+	"		vec.generic[1] = v##sign##int##bits##x##halfsize##_splat(x); \\\n"
+	"	\\\n"
+	"		return vec; \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_DBL_LOAD_EX(name, sign, bits, size, halfsize) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_##name(const vec_##sign##int##bits x[size]) \\\n"
+	"	{ \\\n"
+	"		v##sign##int##bits##x##size vec; \\\n"
+	"	\\\n"
+	"		vec.generic[0] = v##sign##int##bits##x##halfsize##_##name(x); \\\n"
+	"		vec.generic[1] = v##sign##int##bits##x##halfsize##_##name(x + halfsize); \\\n"
+	"	\\\n"
+	"		return vec; \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_DBL_LOAD(sign, bits, size, halfsize) VEC_GENERIC_DBL_LOAD_EX(load, sign, bits, size, halfsize)\n"
+	"#define VEC_GENERIC_DBL_LOAD_ALIGNED(sign, bits, size, halfsize) VEC_GENERIC_DBL_LOAD_EX(load_aligned, sign, bits, size, halfsize)\n"
+	"\n"
+	"#define VEC_GENERIC_DBL_STORE_EX(name, sign, bits, size, halfsize) \\\n"
+	"	VEC_FUNC_IMPL void v##sign##int##bits##x##size##_##name(v##sign##int##bits##x##size vec, vec_##sign##int##bits x[size]) \\\n"
+	"	{ \\\n"
+	"		v##sign##int##bits##x##halfsize##_##name(vec.generic[0], x); \\\n"
+	"		v##sign##int##bits##x##halfsize##_##name(vec.generic[1], x + halfsize); \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_DBL_STORE(sign, bits, size, halfsize) VEC_GENERIC_DBL_STORE_EX(store, sign, bits, size, halfsize)\n"
+	"#define VEC_GENERIC_DBL_STORE_ALIGNED(sign, bits, size, halfsize) VEC_GENERIC_DBL_STORE_EX(store_aligned, sign, bits, size, halfsize)\n"
+	"\n"
+	"#define VEC_GENERIC_DBL_OP(name, sign, bits, size, halfsize, secondsign) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_##name(v##sign##int##bits##x##size vec1, v##secondsign##int##bits##x##size vec2) \\\n"
+	"	{ \\\n"
+	"		vec1.generic[0] = v##sign##int##bits##x##halfsize##_##name(vec1.generic[0], vec2.generic[0]); \\\n"
+	"		vec1.generic[1] = v##sign##int##bits##x##halfsize##_##name(vec1.generic[1], vec2.generic[1]); \\\n"
+	"	\\\n"
+	"		return vec1; \\\n"
+	"	}\n"
+	"\n"
+	"#define VEC_GENERIC_DBL_ADD(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(add, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_SUB(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(sub, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_MUL(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(mul, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_DIV(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(div, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_AVG(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(avg, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_LSHIFT(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(lshift, sign, bits, size, halfsize, u)\n"
+	"#define VEC_GENERIC_DBL_RSHIFT(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(rshift, sign, bits, size, halfsize, u)\n"
+	"#define VEC_GENERIC_DBL_LRSHIFT(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(lrshift, sign, bits, size, halfsize, u)\n"
+	"#define VEC_GENERIC_DBL_AND(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(and, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_OR(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(or, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_XOR(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(xor, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_MIN(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(min, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_MAX(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(max, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_CMPLT(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(cmplt, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_CMPLE(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(cmple, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_CMPEQ(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(cmpeq, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_CMPGE(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(cmpge, sign, bits, size, halfsize, sign)\n"
+	"#define VEC_GENERIC_DBL_CMPGT(sign, bits, size, halfsize) VEC_GENERIC_DBL_OP(cmpgt, sign, bits, size, halfsize, sign)\n"
+	"\n"
+	"#define VEC_GENERIC_DBL_NOT(sign, bits, size, halfsize) \\\n"
+	"	VEC_FUNC_IMPL v##sign##int##bits##x##size v##sign##int##bits##x##size##_not(v##sign##int##bits##x##size vec) \\\n"
+	"	{ \\\n"
+	"		vec.generic[0] = v##sign##int##bits##x##halfsize##_not(vec.generic[0]); \\\n"
+	"		vec.generic[1] = v##sign##int##bits##x##halfsize##_not(vec.generic[1]); \\\n"
+	"	\\\n"
+	"		return vec; \\\n"
+	"	}\n"
+	"\n"
+	"/* ------------------------------------------------------------------------ */\n"
+	"/* PREPROCESSOR HELL INCOMING */\n";
+
+static const char *footer = 
+	"#endif /* VEC_IMPL_GENERIC_H_ */\n";
+
+/* ------------------------------------------------------------------------ */
+
+static void print_generic_op(const char *op, int is_signed, int bits, int size)
+{
+	printf(
+		"#ifndef V%sINT%dx%d_%s_DEFINED\n"
+		"VEC_GENERIC_%s(%s, %d, %d)\n"
+		"# define V%sINT%dx%d_%s_DEFINED\n"
+		"#endif\n",
+	(is_signed ? "" : "U"), bits, size, op, op, (is_signed ? "/* nothing */" : "u"), bits, size, (is_signed ? "" : "U"), bits, size, op);
+}
+
+static void print_generic_dbl_op(const char *op, int is_signed, int bits, int size)
+{
+	printf(
+		"#ifndef V%sINT%dx%d_%s_DEFINED\n"
+		"VEC_GENERIC_DBL_%s(%s, %d, %d, %d)\n"
+		"# define V%sINT%dx%d_%s_DEFINED\n"
+		"#endif\n\n",
+	(is_signed ? "" : "U"), bits, size, op, op, (is_signed ? "/* nothing */" : "u"), bits, size, size / 2, (is_signed ? "" : "U"), bits, size, op);
+}
+
+typedef void (*print_op_spec)(const char *op, int is_signed, int bits, int size);
+
+static inline void print_ops(int is_signed, int bits, int size, print_op_spec print_op)
+{
+	/* all supported operations here */
+	static const char *ops[] = {
+		"SPLAT",
+		"LOAD_ALIGNED",
+		"LOAD",
+		"STORE_ALIGNED",
+		"STORE",
+		"ADD",
+		"SUB",
+		"MUL",
+		"DIV",
+		"AVG",
+		"AND",
+		"OR",
+		"XOR",
+		"NOT",
+		"CMPLT",
+		"CMPEQ",
+		"CMPGT",
+		"CMPLE", /* these two must be after CMPLT and CMPGT respectfully, */
+		"CMPGE", /* because their definitions call those functions */
+		"MIN",
+		"MAX",
+		"RSHIFT",
+		"LRSHIFT",
+		"LSHIFT",
+		NULL,
+	};
+	int i;
+
+	printf("\n\n/* v%sint%dx%d */\n\n", (is_signed ? "u" : ""), bits, size);
+
+	for (i = 0; ops[i]; i++)
+		print_op(ops[i], is_signed, bits, size);
+}
+
+int main(void)
+{
+	static struct {
+		int bits, size;
+		print_op_spec print_op;
+	} defs[] = {
+		/* -- 8-bit */
+		{8, 2, print_generic_op},
+		{8, 4, print_generic_dbl_op},
+		{8, 8, print_generic_dbl_op},
+		{8, 16, print_generic_dbl_op},
+		{8, 32, print_generic_dbl_op},
+		{8, 64, print_generic_dbl_op},
+
+		/* -- 16-bit */
+		{16, 2, print_generic_op},
+		{16, 4, print_generic_dbl_op},
+		{16, 8, print_generic_dbl_op},
+		{16, 16, print_generic_dbl_op},
+		{16, 32, print_generic_dbl_op},
+
+		/* -- 32-bit */
+		{32, 2, print_generic_op},
+		{32, 4, print_generic_dbl_op},
+		{32, 8, print_generic_dbl_op},
+		{32, 16, print_generic_dbl_op},
+
+		/* -- 64-bit */
+		{64, 2, print_generic_op},
+		{64, 4, print_generic_dbl_op},
+		{64, 8, print_generic_dbl_op},
+	};
+	int i;
+
+	puts(header);
+
+	for (i = 0; i < ARRAY_SIZE(defs); i++) {
+		print_ops(1, defs[i].bits, defs[i].size, defs[i].print_op);
+		print_ops(0, defs[i].bits, defs[i].size, defs[i].print_op);
+	}
+
+	puts(footer);
+}