comparison src/impl/x86/mmx.c @ 23:e26874655738

*: huge refactor, new major release (hahaha) I keep finding things that are broken... The problem NOW was that vec would unintentionally build some functions with extended instruction sets, which is Bad and would mean that for all intents and purposes the CPU detection was completely broken. Now vec is no longer header only either. Boohoo. However this gives a lot more flexibility to vec since we no longer want or need to care about C++ crap. The NEON and Altivec implementations have not been updated which means they won't compile hence why they're commented out in the cmake build file.
author Paper <paper@tflc.us>
date Sun, 24 Nov 2024 02:52:40 -0500
parents
children e49e70f7012f
comparison
equal deleted inserted replaced
22:fbcd3fa6f8fc 23:e26874655738
1 /**
2 * vec - a tiny SIMD vector library in C99
3 *
4 * Copyright (c) 2024 Paper
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 **/
24
25 #include "vec/vec.h"
26 #include "vec/impl/x86/mmx.h"
27 #include "vec/impl/generic.h"
28
29 #include <mmintrin.h>
30 #include <string.h>
31
32 #define VEC_MMX_OPERATION_8x8(op, sign) \
33 do { \
34 /* unpack and multiply */ \
35 union v##sign##int8x8_impl_data *vec1d = (union v##sign##int8x8_impl_data *)&vec1; \
36 union v##sign##int8x8_impl_data *vec2d = (union v##sign##int8x8_impl_data *)&vec2; \
37 \
38 __m64 dst_even = _mm_##op##_pi16(vec1d->mmx, vec2d->mmx); \
39 __m64 dst_odd = _mm_##op##_pi16(_mm_srli_pi16(vec1d->mmx, 8), _mm_srli_pi16(vec2d->mmx, 8)); \
40 \
41 /* repack */ \
42 vec1d->mmx = _mm_or_si64( \
43 _mm_slli_pi16(dst_odd, 8), \
44 _mm_srli_pi16(_mm_slli_pi16(dst_even, 8), 8) \
45 ); \
46 return vec1d->vec; \
47 } while (0)
48
49 // shared between MMX variations
50 #define VEC_MMX_MUL_8x8(sign) \
51 VEC_MMX_OPERATION_8x8(mullo, sign)
52
53 #define VEC_MMX_MUL_16x4(sign) \
54 do { \
55 union v##sign##int16x4_impl_data *vec1d = (union v##sign##int16x4_impl_data *)&vec1; \
56 union vuint16x4_impl_data *vec2d = (union vuint16x4_impl_data *)&vec2; \
57 \
58 vec1d->mmx = _mm_mullo_pi16(vec1d->mmx, vec2d->mmx); \
59 return vec1d->vec; \
60 } while (0)
61
62 #define VEC_MMX_MUL_32x2(sign) \
63 /* TODO implement this for real */ \
64 do { \
65 return v##sign##int32x2_generic_mul(vec1, vec2); \
66 } while (0)
67
68 #define VEC_MMX_DEFINE_OPERATIONS_SIGN(sign, bits, size) \
69 union v##sign##int##bits##x##size##_impl_data { \
70 v##sign##int##bits##x##size vec; \
71 __m64 mmx; \
72 }; \
73 \
74 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_mmx_load_aligned(const vec_##sign##int##bits in[size]) \
75 { \
76 v##sign##int##bits##x##size vec; \
77 memcpy(&vec, in, sizeof(vec)); \
78 return vec; \
79 } \
80 \
81 static void v##sign##int##bits##x##size##_mmx_store_aligned(v##sign##int##bits##x##size vec, vec_##sign##int##bits out[size]) \
82 { \
83 memcpy(out, &vec, sizeof(vec)); \
84 } \
85 \
86 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_mmx_add(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
87 { \
88 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
89 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
90 \
91 vec1d->mmx = _mm_add_pi##bits(vec1d->mmx, vec2d->mmx); \
92 \
93 return vec1d->vec; \
94 } \
95 \
96 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_mmx_sub(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
97 { \
98 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
99 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
100 \
101 vec1d->mmx = _mm_sub_pi##bits(vec1d->mmx, vec2d->mmx); \
102 \
103 return vec1d->vec; \
104 } \
105 \
106 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_mmx_mul(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
107 { \
108 VEC_MMX_MUL_##bits##x##size(sign); \
109 } \
110 \
111 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_mmx_and(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
112 { \
113 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
114 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
115 \
116 vec1d->mmx = _mm_and_si64(vec1d->mmx, vec2d->mmx); \
117 \
118 return vec1d->vec; \
119 } \
120 \
121 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_mmx_or(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
122 { \
123 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
124 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
125 \
126 vec1d->mmx = _mm_or_si64(vec1d->mmx, vec2d->mmx); \
127 \
128 return vec1d->vec; \
129 } \
130 \
131 static v##sign##int##bits##x##size v##sign##int##bits##x##size##_mmx_xor(v##sign##int##bits##x##size vec1, v##sign##int##bits##x##size vec2) \
132 { \
133 union v##sign##int##bits##x##size##_impl_data *vec1d = (union v##sign##int##bits##x##size##_impl_data *)&vec1; \
134 union v##sign##int##bits##x##size##_impl_data *vec2d = (union v##sign##int##bits##x##size##_impl_data *)&vec2; \
135 \
136 vec1d->mmx = _mm_xor_si64(vec1d->mmx, vec2d->mmx); \
137 \
138 return vec1d->vec; \
139 } \
140 \
141 const v##sign##int##bits##x##size##_impl v##sign##int##bits##x##size##_impl_mmx = { \
142 v##sign##int##bits##x##size##_generic_splat, \
143 v##sign##int##bits##x##size##_mmx_load_aligned, \
144 v##sign##int##bits##x##size##_mmx_load_aligned, \
145 v##sign##int##bits##x##size##_mmx_store_aligned, \
146 v##sign##int##bits##x##size##_mmx_store_aligned, \
147 v##sign##int##bits##x##size##_mmx_add, \
148 v##sign##int##bits##x##size##_mmx_sub, \
149 v##sign##int##bits##x##size##_mmx_mul, \
150 v##sign##int##bits##x##size##_generic_div, \
151 v##sign##int##bits##x##size##_generic_avg, \
152 v##sign##int##bits##x##size##_mmx_and, \
153 v##sign##int##bits##x##size##_mmx_or, \
154 v##sign##int##bits##x##size##_mmx_xor, \
155 v##sign##int##bits##x##size##_generic_not, \
156 v##sign##int##bits##x##size##_generic_lshift, \
157 v##sign##int##bits##x##size##_generic_rshift, \
158 v##sign##int##bits##x##size##_generic_lrshift, \
159 v##sign##int##bits##x##size##_generic_cmplt, \
160 v##sign##int##bits##x##size##_generic_cmple, \
161 v##sign##int##bits##x##size##_generic_cmpeq, \
162 v##sign##int##bits##x##size##_generic_cmpge, \
163 v##sign##int##bits##x##size##_generic_cmpgt, \
164 };
165
166 #define VEC_MMX_DEFINE_OPERATIONS(bits, size) \
167 VEC_MMX_DEFINE_OPERATIONS_SIGN(u, bits, size) \
168 VEC_MMX_DEFINE_OPERATIONS_SIGN( , bits, size)
169
170 VEC_MMX_DEFINE_OPERATIONS(8, 8)
171 VEC_MMX_DEFINE_OPERATIONS(16, 4)
172 VEC_MMX_DEFINE_OPERATIONS(32, 2)