changeset 43:d96806b543ac

altivec: for soem reason the entire file got overwritten with zeroes
author Paper <paper@tflc.us>
date Tue, 29 Apr 2025 16:48:58 -0400
parents ca0f0223a62d
children b0a3f0248ecc
files include/vec/impl/ppc/altivec.h
diffstat 1 files changed, 1173 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- a/include/vec/impl/ppc/altivec.h	Tue Apr 29 16:36:35 2025 -0400
+++ b/include/vec/impl/ppc/altivec.h	Tue Apr 29 16:48:58 2025 -0400
@@ -0,0 +1,1173 @@
+/**
+ * vec - a tiny SIMD vector library in C99
+ * 
+ * Copyright (c) 2024-2025 Paper
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+**/
+
+/* This file is automatically generated! Do not edit it directly!
+ * Edit the code that generates it in utils/genaltivec.c  --paper */
+
+#ifndef VEC_IMPL_PPC_ALTIVEC_H_
+#define VEC_IMPL_PPC_ALTIVEC_H_
+
+
+
+
+/* vuint8x16 */
+
+#if defined(vec_splats) || defined(vec_splat_s8)
+#ifndef VINT8x16_SPLAT_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_splat(vec_int8 x)
+{
+	vint8x16 vec;
+#ifdef vec_splats
+	vec.altivec = vec_splats(x);
+#elif defined(vec_splat_s8)
+	vec.altivec = vec_splat_s8(x);
+#else
+# error logic error
+#endif
+	return vec;
+}
+# define VINT8x16_SPLAT_DEFINED
+#endif
+#endif
+#ifndef VINT8x16_LOAD_ALIGNED_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_load_aligned(const vec_int8 x[16])
+{
+	vint8x16 vec;
+	vec.altivec = vec_ld(0, x);
+	return vec;
+}
+# define VINT8x16_LOAD_ALIGNED_DEFINED
+#endif
+#ifndef VINT8x16_LOAD_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_load(const vec_int8 x[16])
+{
+	vint8x16 vec;
+	vec.altivec = vec_perm(vec_ld(0, x), vec_ld(16, x), vec_lvsl(0, x));
+	return vec;
+}
+# define VINT8x16_LOAD_DEFINED
+#endif
+#ifndef VINT8x16_STORE_ALIGNED_DEFINED
+VEC_FUNC_IMPL void vint8x16_store_aligned(vint8x16 vec, vec_int8 arr[16])
+{
+	vec_st(vec.altivec, 0, arr);
+}
+# define VINT8x16_STORE_ALIGNED_DEFINED
+#endif
+#ifndef VINT8x16_STORE_DEFINED
+VEC_FUNC_IMPL void vint8x16_store(vint8x16 vec, vec_int8 arr[16])
+{
+	memcpy(arr, &vec, sizeof(vec));
+}
+# define VINT8x16_STORE_DEFINED
+#endif
+#ifndef VINT8x16_ADD_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_add(vint8x16 vec1, vint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_add(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_ADD_DEFINED
+#endif
+#ifndef VINT8x16_SUB_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_sub(vint8x16 vec1, vint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_sub(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_SUB_DEFINED
+#endif
+#ifdef vec_mul
+#ifndef VINT8x16_MUL_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_mul(vint8x16 vec1, vint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_mul(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_MUL_DEFINED
+#endif
+#endif
+#ifndef VINT8x16_AND_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_and(vint8x16 vec1, vint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_and(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_AND_DEFINED
+#endif
+#ifndef VINT8x16_OR_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_or(vint8x16 vec1, vint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_or(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_OR_DEFINED
+#endif
+#ifndef VINT8x16_XOR_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_xor(vint8x16 vec1, vint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_xor(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_XOR_DEFINED
+#endif
+#ifndef VINT8x16_CMPLT_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_cmplt(vint8x16 vec1, vint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_cmplt(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_CMPLT_DEFINED
+#endif
+#ifndef VINT8x16_CMPEQ_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_cmpeq(vint8x16 vec1, vint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_cmpeq(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_CMPEQ_DEFINED
+#endif
+#ifndef VINT8x16_CMPGT_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_cmpgt(vint8x16 vec1, vint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_cmpgt(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_CMPGT_DEFINED
+#endif
+#ifndef VINT8x16_MIN_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_min(vint8x16 vec1, vint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_min(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_MIN_DEFINED
+#endif
+#ifndef VINT8x16_MAX_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_max(vint8x16 vec1, vint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_max(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_MAX_DEFINED
+#endif
+#ifndef VINT8x16_AVG_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_avg(vint8x16 vec1, vint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_avg(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_AVG_DEFINED
+#endif
+#ifndef VINT8x16_LSHIFT_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_lshift(vint8x16 vec1, vuint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_sl(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_LSHIFT_DEFINED
+#endif
+#ifndef VINT8x16_LRSHIFT_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_lrshift(vint8x16 vec1, vuint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = (vector signed char)vec_sr(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_LRSHIFT_DEFINED
+#endif
+#ifndef VINT8x16_RSHIFT_DEFINED
+VEC_FUNC_IMPL vint8x16 vint8x16_rshift(vint8x16 vec1, vuint8x16 vec2)
+{
+	vint8x16 vec;
+	vec.altivec = vec_sra(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT8x16_RSHIFT_DEFINED
+#endif
+
+
+/* vint8x16 */
+
+#if defined(vec_splats) || defined(vec_splat_u8)
+#ifndef VUINT8x16_SPLAT_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_splat(vec_uint8 x)
+{
+	vuint8x16 vec;
+#ifdef vec_splats
+	vec.altivec = vec_splats(x);
+#elif defined(vec_splat_u8)
+	vec.altivec = vec_splat_u8(x);
+#else
+# error logic error
+#endif
+	return vec;
+}
+# define VUINT8x16_SPLAT_DEFINED
+#endif
+#endif
+#ifndef VUINT8x16_LOAD_ALIGNED_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_load_aligned(const vec_uint8 x[16])
+{
+	vuint8x16 vec;
+	vec.altivec = vec_ld(0, x);
+	return vec;
+}
+# define VUINT8x16_LOAD_ALIGNED_DEFINED
+#endif
+#ifndef VUINT8x16_LOAD_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_load(const vec_uint8 x[16])
+{
+	vuint8x16 vec;
+	vec.altivec = vec_perm(vec_ld(0, x), vec_ld(16, x), vec_lvsl(0, x));
+	return vec;
+}
+# define VUINT8x16_LOAD_DEFINED
+#endif
+#ifndef VUINT8x16_STORE_ALIGNED_DEFINED
+VEC_FUNC_IMPL void vuint8x16_store_aligned(vuint8x16 vec, vec_uint8 arr[16])
+{
+	vec_st(vec.altivec, 0, arr);
+}
+# define VUINT8x16_STORE_ALIGNED_DEFINED
+#endif
+#ifndef VUINT8x16_STORE_DEFINED
+VEC_FUNC_IMPL void vuint8x16_store(vuint8x16 vec, vec_uint8 arr[16])
+{
+	memcpy(arr, &vec, sizeof(vec));
+}
+# define VUINT8x16_STORE_DEFINED
+#endif
+#ifndef VUINT8x16_ADD_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_add(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_add(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_ADD_DEFINED
+#endif
+#ifndef VUINT8x16_SUB_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_sub(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_sub(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_SUB_DEFINED
+#endif
+#ifdef vec_mul
+#ifndef VUINT8x16_MUL_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_mul(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_mul(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_MUL_DEFINED
+#endif
+#endif
+#ifndef VUINT8x16_AND_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_and(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_and(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_AND_DEFINED
+#endif
+#ifndef VUINT8x16_OR_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_or(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_or(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_OR_DEFINED
+#endif
+#ifndef VUINT8x16_XOR_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_xor(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_xor(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_XOR_DEFINED
+#endif
+#ifndef VUINT8x16_CMPLT_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_cmplt(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_cmplt(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_CMPLT_DEFINED
+#endif
+#ifndef VUINT8x16_CMPEQ_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_cmpeq(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_cmpeq(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_CMPEQ_DEFINED
+#endif
+#ifndef VUINT8x16_CMPGT_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_cmpgt(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_cmpgt(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_CMPGT_DEFINED
+#endif
+#ifndef VUINT8x16_MIN_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_min(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_min(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_MIN_DEFINED
+#endif
+#ifndef VUINT8x16_MAX_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_max(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_max(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_MAX_DEFINED
+#endif
+#ifndef VUINT8x16_AVG_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_avg(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_avg(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_AVG_DEFINED
+#endif
+#ifndef VUINT8x16_LSHIFT_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_lshift(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_sl(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_LSHIFT_DEFINED
+#endif
+#ifndef VUINT8x16_LRSHIFT_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_lrshift(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = (vector unsigned char)vec_sr(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_LRSHIFT_DEFINED
+#endif
+#ifndef VUINT8x16_RSHIFT_DEFINED
+VEC_FUNC_IMPL vuint8x16 vuint8x16_rshift(vuint8x16 vec1, vuint8x16 vec2)
+{
+	vuint8x16 vec;
+	vec.altivec = vec_sr(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT8x16_RSHIFT_DEFINED
+#endif
+
+
+/* vuint16x8 */
+
+#if defined(vec_splats) || defined(vec_splat_s16)
+#ifndef VINT16x8_SPLAT_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_splat(vec_int16 x)
+{
+	vint16x8 vec;
+#ifdef vec_splats
+	vec.altivec = vec_splats(x);
+#elif defined(vec_splat_s16)
+	vec.altivec = vec_splat_s16(x);
+#else
+# error logic error
+#endif
+	return vec;
+}
+# define VINT16x8_SPLAT_DEFINED
+#endif
+#endif
+#ifndef VINT16x8_LOAD_ALIGNED_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_load_aligned(const vec_int16 x[8])
+{
+	vint16x8 vec;
+	vec.altivec = vec_ld(0, x);
+	return vec;
+}
+# define VINT16x8_LOAD_ALIGNED_DEFINED
+#endif
+#ifndef VINT16x8_LOAD_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_load(const vec_int16 x[8])
+{
+	vint16x8 vec;
+	vec.altivec = vec_perm(vec_ld(0, x), vec_ld(16, x), vec_lvsl(0, x));
+	return vec;
+}
+# define VINT16x8_LOAD_DEFINED
+#endif
+#ifndef VINT16x8_STORE_ALIGNED_DEFINED
+VEC_FUNC_IMPL void vint16x8_store_aligned(vint16x8 vec, vec_int16 arr[8])
+{
+	vec_st(vec.altivec, 0, arr);
+}
+# define VINT16x8_STORE_ALIGNED_DEFINED
+#endif
+#ifndef VINT16x8_STORE_DEFINED
+VEC_FUNC_IMPL void vint16x8_store(vint16x8 vec, vec_int16 arr[8])
+{
+	memcpy(arr, &vec, sizeof(vec));
+}
+# define VINT16x8_STORE_DEFINED
+#endif
+#ifndef VINT16x8_ADD_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_add(vint16x8 vec1, vint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_add(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_ADD_DEFINED
+#endif
+#ifndef VINT16x8_SUB_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_sub(vint16x8 vec1, vint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_sub(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_SUB_DEFINED
+#endif
+#ifdef vec_mul
+#ifndef VINT16x8_MUL_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_mul(vint16x8 vec1, vint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_mul(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_MUL_DEFINED
+#endif
+#endif
+#ifndef VINT16x8_AND_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_and(vint16x8 vec1, vint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_and(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_AND_DEFINED
+#endif
+#ifndef VINT16x8_OR_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_or(vint16x8 vec1, vint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_or(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_OR_DEFINED
+#endif
+#ifndef VINT16x8_XOR_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_xor(vint16x8 vec1, vint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_xor(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_XOR_DEFINED
+#endif
+#ifndef VINT16x8_CMPLT_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_cmplt(vint16x8 vec1, vint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_cmplt(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_CMPLT_DEFINED
+#endif
+#ifndef VINT16x8_CMPEQ_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_cmpeq(vint16x8 vec1, vint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_cmpeq(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_CMPEQ_DEFINED
+#endif
+#ifndef VINT16x8_CMPGT_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_cmpgt(vint16x8 vec1, vint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_cmpgt(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_CMPGT_DEFINED
+#endif
+#ifndef VINT16x8_MIN_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_min(vint16x8 vec1, vint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_min(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_MIN_DEFINED
+#endif
+#ifndef VINT16x8_MAX_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_max(vint16x8 vec1, vint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_max(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_MAX_DEFINED
+#endif
+#ifndef VINT16x8_AVG_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_avg(vint16x8 vec1, vint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_avg(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_AVG_DEFINED
+#endif
+#ifndef VINT16x8_LSHIFT_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_lshift(vint16x8 vec1, vuint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_sl(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_LSHIFT_DEFINED
+#endif
+#ifndef VINT16x8_LRSHIFT_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_lrshift(vint16x8 vec1, vuint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = (vector signed short)vec_sr(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_LRSHIFT_DEFINED
+#endif
+#ifndef VINT16x8_RSHIFT_DEFINED
+VEC_FUNC_IMPL vint16x8 vint16x8_rshift(vint16x8 vec1, vuint16x8 vec2)
+{
+	vint16x8 vec;
+	vec.altivec = vec_sra(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT16x8_RSHIFT_DEFINED
+#endif
+
+
+/* vint16x8 */
+
+#if defined(vec_splats) || defined(vec_splat_u16)
+#ifndef VUINT16x8_SPLAT_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_splat(vec_uint16 x)
+{
+	vuint16x8 vec;
+#ifdef vec_splats
+	vec.altivec = vec_splats(x);
+#elif defined(vec_splat_u16)
+	vec.altivec = vec_splat_u16(x);
+#else
+# error logic error
+#endif
+	return vec;
+}
+# define VUINT16x8_SPLAT_DEFINED
+#endif
+#endif
+#ifndef VUINT16x8_LOAD_ALIGNED_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_load_aligned(const vec_uint16 x[8])
+{
+	vuint16x8 vec;
+	vec.altivec = vec_ld(0, x);
+	return vec;
+}
+# define VUINT16x8_LOAD_ALIGNED_DEFINED
+#endif
+#ifndef VUINT16x8_LOAD_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_load(const vec_uint16 x[8])
+{
+	vuint16x8 vec;
+	vec.altivec = vec_perm(vec_ld(0, x), vec_ld(16, x), vec_lvsl(0, x));
+	return vec;
+}
+# define VUINT16x8_LOAD_DEFINED
+#endif
+#ifndef VUINT16x8_STORE_ALIGNED_DEFINED
+VEC_FUNC_IMPL void vuint16x8_store_aligned(vuint16x8 vec, vec_uint16 arr[8])
+{
+	vec_st(vec.altivec, 0, arr);
+}
+# define VUINT16x8_STORE_ALIGNED_DEFINED
+#endif
+#ifndef VUINT16x8_STORE_DEFINED
+VEC_FUNC_IMPL void vuint16x8_store(vuint16x8 vec, vec_uint16 arr[8])
+{
+	memcpy(arr, &vec, sizeof(vec));
+}
+# define VUINT16x8_STORE_DEFINED
+#endif
+#ifndef VUINT16x8_ADD_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_add(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_add(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_ADD_DEFINED
+#endif
+#ifndef VUINT16x8_SUB_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_sub(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_sub(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_SUB_DEFINED
+#endif
+#ifdef vec_mul
+#ifndef VUINT16x8_MUL_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_mul(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_mul(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_MUL_DEFINED
+#endif
+#endif
+#ifndef VUINT16x8_AND_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_and(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_and(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_AND_DEFINED
+#endif
+#ifndef VUINT16x8_OR_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_or(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_or(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_OR_DEFINED
+#endif
+#ifndef VUINT16x8_XOR_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_xor(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_xor(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_XOR_DEFINED
+#endif
+#ifndef VUINT16x8_CMPLT_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_cmplt(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_cmplt(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_CMPLT_DEFINED
+#endif
+#ifndef VUINT16x8_CMPEQ_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_cmpeq(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_cmpeq(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_CMPEQ_DEFINED
+#endif
+#ifndef VUINT16x8_CMPGT_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_cmpgt(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_cmpgt(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_CMPGT_DEFINED
+#endif
+#ifndef VUINT16x8_MIN_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_min(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_min(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_MIN_DEFINED
+#endif
+#ifndef VUINT16x8_MAX_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_max(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_max(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_MAX_DEFINED
+#endif
+#ifndef VUINT16x8_AVG_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_avg(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_avg(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_AVG_DEFINED
+#endif
+#ifndef VUINT16x8_LSHIFT_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_lshift(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_sl(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_LSHIFT_DEFINED
+#endif
+#ifndef VUINT16x8_LRSHIFT_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_lrshift(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = (vector unsigned short)vec_sr(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_LRSHIFT_DEFINED
+#endif
+#ifndef VUINT16x8_RSHIFT_DEFINED
+VEC_FUNC_IMPL vuint16x8 vuint16x8_rshift(vuint16x8 vec1, vuint16x8 vec2)
+{
+	vuint16x8 vec;
+	vec.altivec = vec_sr(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT16x8_RSHIFT_DEFINED
+#endif
+
+
+/* vuint32x4 */
+
+#if defined(vec_splats) || defined(vec_splat_s32)
+#ifndef VINT32x4_SPLAT_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_splat(vec_int32 x)
+{
+	vint32x4 vec;
+#ifdef vec_splats
+	vec.altivec = vec_splats(x);
+#elif defined(vec_splat_s32)
+	vec.altivec = vec_splat_s32(x);
+#else
+# error logic error
+#endif
+	return vec;
+}
+# define VINT32x4_SPLAT_DEFINED
+#endif
+#endif
+#ifndef VINT32x4_LOAD_ALIGNED_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_load_aligned(const vec_int32 x[4])
+{
+	vint32x4 vec;
+	vec.altivec = vec_ld(0, x);
+	return vec;
+}
+# define VINT32x4_LOAD_ALIGNED_DEFINED
+#endif
+#ifndef VINT32x4_LOAD_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_load(const vec_int32 x[4])
+{
+	vint32x4 vec;
+	vec.altivec = vec_perm(vec_ld(0, x), vec_ld(16, x), vec_lvsl(0, x));
+	return vec;
+}
+# define VINT32x4_LOAD_DEFINED
+#endif
+#ifndef VINT32x4_STORE_ALIGNED_DEFINED
+VEC_FUNC_IMPL void vint32x4_store_aligned(vint32x4 vec, vec_int32 arr[4])
+{
+	vec_st(vec.altivec, 0, arr);
+}
+# define VINT32x4_STORE_ALIGNED_DEFINED
+#endif
+#ifndef VINT32x4_STORE_DEFINED
+VEC_FUNC_IMPL void vint32x4_store(vint32x4 vec, vec_int32 arr[4])
+{
+	memcpy(arr, &vec, sizeof(vec));
+}
+# define VINT32x4_STORE_DEFINED
+#endif
+#ifndef VINT32x4_ADD_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_add(vint32x4 vec1, vint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_add(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_ADD_DEFINED
+#endif
+#ifndef VINT32x4_SUB_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_sub(vint32x4 vec1, vint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_sub(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_SUB_DEFINED
+#endif
+#ifdef vec_mul
+#ifndef VINT32x4_MUL_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_mul(vint32x4 vec1, vint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_mul(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_MUL_DEFINED
+#endif
+#endif
+#ifndef VINT32x4_AND_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_and(vint32x4 vec1, vint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_and(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_AND_DEFINED
+#endif
+#ifndef VINT32x4_OR_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_or(vint32x4 vec1, vint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_or(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_OR_DEFINED
+#endif
+#ifndef VINT32x4_XOR_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_xor(vint32x4 vec1, vint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_xor(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_XOR_DEFINED
+#endif
+#ifndef VINT32x4_CMPLT_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_cmplt(vint32x4 vec1, vint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_cmplt(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_CMPLT_DEFINED
+#endif
+#ifndef VINT32x4_CMPEQ_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_cmpeq(vint32x4 vec1, vint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_cmpeq(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_CMPEQ_DEFINED
+#endif
+#ifndef VINT32x4_CMPGT_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_cmpgt(vint32x4 vec1, vint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_cmpgt(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_CMPGT_DEFINED
+#endif
+#ifndef VINT32x4_MIN_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_min(vint32x4 vec1, vint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_min(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_MIN_DEFINED
+#endif
+#ifndef VINT32x4_MAX_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_max(vint32x4 vec1, vint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_max(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_MAX_DEFINED
+#endif
+#ifndef VINT32x4_AVG_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_avg(vint32x4 vec1, vint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_avg(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_AVG_DEFINED
+#endif
+#ifndef VINT32x4_LSHIFT_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_lshift(vint32x4 vec1, vuint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_sl(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_LSHIFT_DEFINED
+#endif
+#ifndef VINT32x4_LRSHIFT_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_lrshift(vint32x4 vec1, vuint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = (vector signed int)vec_sr(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_LRSHIFT_DEFINED
+#endif
+#ifndef VINT32x4_RSHIFT_DEFINED
+VEC_FUNC_IMPL vint32x4 vint32x4_rshift(vint32x4 vec1, vuint32x4 vec2)
+{
+	vint32x4 vec;
+	vec.altivec = vec_sra(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VINT32x4_RSHIFT_DEFINED
+#endif
+
+
+/* vint32x4 */
+
+#if defined(vec_splats) || defined(vec_splat_u32)
+#ifndef VUINT32x4_SPLAT_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_splat(vec_uint32 x)
+{
+	vuint32x4 vec;
+#ifdef vec_splats
+	vec.altivec = vec_splats(x);
+#elif defined(vec_splat_u32)
+	vec.altivec = vec_splat_u32(x);
+#else
+# error logic error
+#endif
+	return vec;
+}
+# define VUINT32x4_SPLAT_DEFINED
+#endif
+#endif
+#ifndef VUINT32x4_LOAD_ALIGNED_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_load_aligned(const vec_uint32 x[4])
+{
+	vuint32x4 vec;
+	vec.altivec = vec_ld(0, x);
+	return vec;
+}
+# define VUINT32x4_LOAD_ALIGNED_DEFINED
+#endif
+#ifndef VUINT32x4_LOAD_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_load(const vec_uint32 x[4])
+{
+	vuint32x4 vec;
+	vec.altivec = vec_perm(vec_ld(0, x), vec_ld(16, x), vec_lvsl(0, x));
+	return vec;
+}
+# define VUINT32x4_LOAD_DEFINED
+#endif
+#ifndef VUINT32x4_STORE_ALIGNED_DEFINED
+VEC_FUNC_IMPL void vuint32x4_store_aligned(vuint32x4 vec, vec_uint32 arr[4])
+{
+	vec_st(vec.altivec, 0, arr);
+}
+# define VUINT32x4_STORE_ALIGNED_DEFINED
+#endif
+#ifndef VUINT32x4_STORE_DEFINED
+VEC_FUNC_IMPL void vuint32x4_store(vuint32x4 vec, vec_uint32 arr[4])
+{
+	memcpy(arr, &vec, sizeof(vec));
+}
+# define VUINT32x4_STORE_DEFINED
+#endif
+#ifndef VUINT32x4_ADD_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_add(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_add(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_ADD_DEFINED
+#endif
+#ifndef VUINT32x4_SUB_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_sub(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_sub(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_SUB_DEFINED
+#endif
+#ifdef vec_mul
+#ifndef VUINT32x4_MUL_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_mul(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_mul(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_MUL_DEFINED
+#endif
+#endif
+#ifndef VUINT32x4_AND_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_and(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_and(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_AND_DEFINED
+#endif
+#ifndef VUINT32x4_OR_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_or(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_or(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_OR_DEFINED
+#endif
+#ifndef VUINT32x4_XOR_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_xor(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_xor(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_XOR_DEFINED
+#endif
+#ifndef VUINT32x4_CMPLT_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_cmplt(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_cmplt(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_CMPLT_DEFINED
+#endif
+#ifndef VUINT32x4_CMPEQ_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_cmpeq(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_cmpeq(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_CMPEQ_DEFINED
+#endif
+#ifndef VUINT32x4_CMPGT_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_cmpgt(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_cmpgt(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_CMPGT_DEFINED
+#endif
+#ifndef VUINT32x4_MIN_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_min(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_min(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_MIN_DEFINED
+#endif
+#ifndef VUINT32x4_MAX_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_max(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_max(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_MAX_DEFINED
+#endif
+#ifndef VUINT32x4_AVG_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_avg(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_avg(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_AVG_DEFINED
+#endif
+#ifndef VUINT32x4_LSHIFT_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_lshift(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_sl(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_LSHIFT_DEFINED
+#endif
+#ifndef VUINT32x4_LRSHIFT_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_lrshift(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = (vector unsigned int)vec_sr(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_LRSHIFT_DEFINED
+#endif
+#ifndef VUINT32x4_RSHIFT_DEFINED
+VEC_FUNC_IMPL vuint32x4 vuint32x4_rshift(vuint32x4 vec1, vuint32x4 vec2)
+{
+	vuint32x4 vec;
+	vec.altivec = vec_sr(vec1.altivec, vec2.altivec);
+	return vec;
+}
+# define VUINT32x4_RSHIFT_DEFINED
+#endif
+#endif /* VEC_IMPL_PPC_ALTIVEC_H_ */
+