r347556 - [PowerPC] Vector load/store builtins overstate alignment of pointers
Nemanja Ivanovic via cfe-commits
cfe-commits at lists.llvm.org
Mon Nov 26 06:35:38 PST 2018
Author: nemanjai
Date: Mon Nov 26 06:35:38 2018
New Revision: 347556
URL: http://llvm.org/viewvc/llvm-project?rev=347556&view=rev
Log:
[PowerPC] Vector load/store builtins overstate alignment of pointers
A number of builtins in altivec.h load/store vectors from pointers to scalar
types. Currently they just cast the pointer to a vector pointer, but expressions
like that have the alignment of the target type. Of course, the input pointer
did not have that alignment so this triggers UBSan (and rightly so).
This resolves https://bugs.llvm.org/show_bug.cgi?id=39704
Differential revision: https://reviews.llvm.org/D54787
Modified:
cfe/trunk/lib/Headers/altivec.h
cfe/trunk/test/CodeGen/builtins-ppc-altivec.c
cfe/trunk/test/CodeGen/builtins-ppc-quadword.c
cfe/trunk/test/CodeGen/builtins-ppc-vsx.c
Modified: cfe/trunk/lib/Headers/altivec.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/altivec.h?rev=347556&r1=347555&r2=347556&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/altivec.h (original)
+++ cfe/trunk/lib/Headers/altivec.h Mon Nov 26 06:35:38 2018
@@ -16355,67 +16355,82 @@ vec_revb(vector unsigned __int128 __a) {
/* vec_xl */
+typedef vector signed char unaligned_vec_schar __attribute__((aligned(1)));
+typedef vector unsigned char unaligned_vec_uchar __attribute__((aligned(1)));
+typedef vector signed short unaligned_vec_sshort __attribute__((aligned(1)));
+typedef vector unsigned short unaligned_vec_ushort __attribute__((aligned(1)));
+typedef vector signed int unaligned_vec_sint __attribute__((aligned(1)));
+typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
+typedef vector float unaligned_vec_float __attribute__((aligned(1)));
+
static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
signed char *__ptr) {
- return *(vector signed char *)(__ptr + __offset);
+ return *(unaligned_vec_schar *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned char
vec_xl(signed long long __offset, unsigned char *__ptr) {
- return *(vector unsigned char *)(__ptr + __offset);
+ return *(unaligned_vec_uchar*)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
signed short *__ptr) {
- return *(vector signed short *)(__ptr + __offset);
+ return *(unaligned_vec_sshort *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned short
vec_xl(signed long long __offset, unsigned short *__ptr) {
- return *(vector unsigned short *)(__ptr + __offset);
+ return *(unaligned_vec_ushort *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
signed int *__ptr) {
- return *(vector signed int *)(__ptr + __offset);
+ return *(unaligned_vec_sint *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
unsigned int *__ptr) {
- return *(vector unsigned int *)(__ptr + __offset);
+ return *(unaligned_vec_uint *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
float *__ptr) {
- return *(vector float *)(__ptr + __offset);
+ return *(unaligned_vec_float *)(__ptr + __offset);
}
#ifdef __VSX__
+typedef vector signed long long unaligned_vec_sll __attribute__((aligned(1)));
+typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
+typedef vector double unaligned_vec_double __attribute__((aligned(1)));
+
static inline __ATTRS_o_ai vector signed long long
vec_xl(signed long long __offset, signed long long *__ptr) {
- return *(vector signed long long *)(__ptr + __offset);
+ return *(unaligned_vec_sll *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned long long
vec_xl(signed long long __offset, unsigned long long *__ptr) {
- return *(vector unsigned long long *)(__ptr + __offset);
+ return *(unaligned_vec_ull *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
double *__ptr) {
- return *(vector double *)(__ptr + __offset);
+ return *(unaligned_vec_double *)(__ptr + __offset);
}
#endif
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
+typedef vector unsigned __int128 unaligned_vec_ui128
+ __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed __int128
vec_xl(signed long long __offset, signed __int128 *__ptr) {
- return *(vector signed __int128 *)(__ptr + __offset);
+ return *(unaligned_vec_si128 *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned __int128
vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
- return *(vector unsigned __int128 *)(__ptr + __offset);
+ return *(unaligned_vec_ui128 *)(__ptr + __offset);
}
#endif
@@ -16500,62 +16515,62 @@ vec_xl_be(signed long long __offset, un
static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
signed long long __offset,
signed char *__ptr) {
- *(vector signed char *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_schar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned char __vec,
signed long long __offset,
unsigned char *__ptr) {
- *(vector unsigned char *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_uchar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector signed short __vec,
signed long long __offset,
signed short *__ptr) {
- *(vector signed short *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_sshort *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
signed long long __offset,
unsigned short *__ptr) {
- *(vector unsigned short *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_ushort *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
signed long long __offset,
signed int *__ptr) {
- *(vector signed int *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_sint *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec,
signed long long __offset,
unsigned int *__ptr) {
- *(vector unsigned int *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_uint *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector float __vec,
signed long long __offset,
float *__ptr) {
- *(vector float *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_float *)(__ptr + __offset) = __vec;
}
#ifdef __VSX__
static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
signed long long __offset,
signed long long *__ptr) {
- *(vector signed long long *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_sll *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
signed long long __offset,
unsigned long long *__ptr) {
- *(vector unsigned long long *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_ull *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector double __vec,
signed long long __offset,
double *__ptr) {
- *(vector double *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_double *)(__ptr + __offset) = __vec;
}
#endif
@@ -16563,13 +16578,13 @@ static inline __ATTRS_o_ai void vec_xst(
static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
signed long long __offset,
signed __int128 *__ptr) {
- *(vector signed __int128 *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_si128 *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
signed long long __offset,
unsigned __int128 *__ptr) {
- *(vector unsigned __int128 *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_ui128 *)(__ptr + __offset) = __vec;
}
#endif
Modified: cfe/trunk/test/CodeGen/builtins-ppc-altivec.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/builtins-ppc-altivec.c?rev=347556&r1=347555&r2=347556&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/builtins-ppc-altivec.c (original)
+++ cfe/trunk/test/CodeGen/builtins-ppc-altivec.c Mon Nov 26 06:35:38 2018
@@ -9362,32 +9362,32 @@ void test9() {
// CHECK-LABEL: define void @test9
// CHECK-LE-LABEL: define void @test9
res_vsc = vec_xl(param_sll, ¶m_sc);
- // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
res_vuc = vec_xl(param_sll, ¶m_uc);
- // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
res_vs = vec_xl(param_sll, ¶m_s);
- // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
res_vus = vec_xl(param_sll, ¶m_us);
- // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
res_vi = vec_xl(param_sll, ¶m_i);
- // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
res_vui = vec_xl(param_sll, ¶m_ui);
- // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
res_vf = vec_xl(param_sll, ¶m_f);
- // CHECK: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 16
+ // CHECK: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 1
}
/* ------------------------------ vec_xst ----------------------------------- */
@@ -9395,32 +9395,32 @@ void test10() {
// CHECK-LABEL: define void @test10
// CHECK-LE-LABEL: define void @test10
vec_xst(vsc, param_sll, ¶m_sc);
- // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 1
vec_xst(vuc, param_sll, ¶m_uc);
- // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 1
vec_xst(vs, param_sll, ¶m_s);
- // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 1
vec_xst(vus, param_sll, ¶m_us);
- // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 1
vec_xst(vi, param_sll, ¶m_i);
- // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 1
vec_xst(vui, param_sll, ¶m_ui);
- // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 1
vec_xst(vf, param_sll, ¶m_f);
- // CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 16
+ // CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 1
}
/* ----------------------------- vec_xl_be ---------------------------------- */
@@ -9428,35 +9428,35 @@ void test11() {
// CHECK-LABEL: define void @test11
// CHECK-LE-LABEL: define void @test11
res_vsc = vec_xl_be(param_sll, ¶m_sc);
- // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
// CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
res_vuc = vec_xl_be(param_sll, ¶m_uc);
- // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
// CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
res_vs = vec_xl_be(param_sll, ¶m_s);
- // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
// CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
res_vus = vec_xl_be(param_sll, ¶m_us);
- // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
// CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
res_vi = vec_xl_be(param_sll, ¶m_i);
- // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}})
res_vui = vec_xl_be(param_sll, ¶m_ui);
- // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 1
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}})
res_vf = vec_xl_be(param_sll, ¶m_f);
- // CHECK: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 16
+ // CHECK: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 1
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}})
}
@@ -9465,34 +9465,34 @@ void test12() {
// CHECK-LABEL: define void @test12
// CHECK-LE-LABEL: define void @test12
vec_xst_be(vsc, param_sll, ¶m_sc);
- // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 1
// CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vuc, param_sll, ¶m_uc);
- // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
+ // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 1
// CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vs, param_sll, ¶m_s);
- // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 1
// CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vus, param_sll, ¶m_us);
- // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
+ // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 1
// CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vi, param_sll, ¶m_i);
- // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 1
// CHECK-LE: call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vui, param_sll, ¶m_ui);
- // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
+ // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 1
// CHECK-LE: call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vf, param_sll, ¶m_f);
- // CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 16
+ // CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 1
// CHECK-LE: call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %{{[0-9]+}}, i8* %{{[0-9]+}})
}
Modified: cfe/trunk/test/CodeGen/builtins-ppc-quadword.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/builtins-ppc-quadword.c?rev=347556&r1=347555&r2=347556&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/builtins-ppc-quadword.c (original)
+++ cfe/trunk/test/CodeGen/builtins-ppc-quadword.c Mon Nov 26 06:35:38 2018
@@ -205,45 +205,45 @@ void test1() {
/* vec_xl */
res_vlll = vec_xl(param_sll, ¶m_lll);
- // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xl' is ambiguous
res_vulll = vec_xl(param_sll, ¶m_ulll);
- // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xl' is ambiguous
/* vec_xst */
vec_xst(vlll, param_sll, ¶m_lll);
- // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xst' is ambiguous
vec_xst(vulll, param_sll, ¶m_ulll);
- // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xst' is ambiguous
/* vec_xl_be */
res_vlll = vec_xl_be(param_sll, ¶m_lll);
- // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xl' is ambiguous
res_vulll = vec_xl_be(param_sll, ¶m_ulll);
- // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xl' is ambiguous
/* vec_xst_be */
vec_xst_be(vlll, param_sll, ¶m_lll);
- // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xst' is ambiguous
vec_xst_be(vulll, param_sll, ¶m_ulll);
- // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
- // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
+ // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
+ // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 1
// CHECK-PPC: error: call to 'vec_xst' is ambiguous
}
Modified: cfe/trunk/test/CodeGen/builtins-ppc-vsx.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/builtins-ppc-vsx.c?rev=347556&r1=347555&r2=347556&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/builtins-ppc-vsx.c (original)
+++ cfe/trunk/test/CodeGen/builtins-ppc-vsx.c Mon Nov 26 06:35:38 2018
@@ -1638,51 +1638,51 @@ res_vsll = vec_slo(vsll, vsc);
// CHECK-LE: @llvm.ppc.altivec.vsro
res_vsll = vec_xl(sll, asll);
-// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
-// CHECK-LE: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 1
+// CHECK-LE: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 1
res_vull = vec_xl(sll, aull);
-// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
-// CHECK-LE: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 1
+// CHECK-LE: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 1
res_vd = vec_xl(sll, ad);
-// CHECK: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 16
-// CHECK-LE: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 16
+// CHECK: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 1
+// CHECK-LE: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 1
vec_xst(vsll, sll, asll);
-// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
-// CHECK-LE: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 1
+// CHECK-LE: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 1
vec_xst(vull, sll, aull);
-// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
-// CHECK-LE: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 1
+// CHECK-LE: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 1
vec_xst(vd, sll, ad);
-// CHECK: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 16
-// CHECK-LE: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 16
+// CHECK: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 1
+// CHECK-LE: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 1
res_vsll = vec_xl_be(sll, asll);
-// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
res_vull = vec_xl_be(sll, aull);
-// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
res_vd = vec_xl_be(sll, ad);
-// CHECK: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 16
+// CHECK: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 1
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}})
vec_xst_be(vsll, sll, asll);
-// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 1
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vull, sll, aull);
-// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 1
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
vec_xst_be(vd, sll, ad);
-// CHECK: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 16
+// CHECK: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 1
// CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}})
res_vf = vec_neg(vf);
More information about the cfe-commits
mailing list