[compiler-rt] r202713 - [msan] Tests for X86 SIMD bitshift intrinsic support.
Evgeniy Stepanov
eugeni.stepanov at gmail.com
Mon Mar 3 05:52:36 PST 2014
Author: eugenis
Date: Mon Mar 3 07:52:36 2014
New Revision: 202713
URL: http://llvm.org/viewvc/llvm-project?rev=202713&view=rev
Log:
[msan] Tests for X86 SIMD bitshift intrinsic support.
Modified:
compiler-rt/trunk/lib/msan/tests/msan_test.cc
Modified: compiler-rt/trunk/lib/msan/tests/msan_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/tests/msan_test.cc?rev=202713&r1=202712&r2=202713&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/tests/msan_test.cc (original)
+++ compiler-rt/trunk/lib/msan/tests/msan_test.cc Mon Mar 3 07:52:36 2014
@@ -62,6 +62,10 @@
# define MSAN_HAS_M128 0
#endif
+#ifdef __AVX2__
+# include <immintrin.h>
+#endif
+
static const int kPageSize = 4096;
typedef unsigned char U1;
@@ -3272,6 +3276,98 @@ TEST(MemorySanitizer, UnalignedStore64)
EXPECT_POISONED_O(x[11], origin);
}
+namespace {
+typedef U2 V8x16 __attribute__((__vector_size__(16)));
+typedef U4 V4x32 __attribute__((__vector_size__(16)));
+typedef U8 V2x64 __attribute__((__vector_size__(16)));
+typedef U4 V8x32 __attribute__((__vector_size__(32)));
+typedef U8 V4x64 __attribute__((__vector_size__(32)));
+
+
+V8x16 shift_sse2_left_scalar(V8x16 x, U4 y) {
+ return _mm_slli_epi16(x, y);
+}
+
+V8x16 shift_sse2_left(V8x16 x, V8x16 y) {
+ return _mm_sll_epi16(x, y);
+}
+
+TEST(VectorShiftTest, sse2_left_scalar) {
+ V8x16 v = {(U2)(*GetPoisoned<U2>() | 3), (U2)(*GetPoisoned<U2>() | 7), 2, 3,
+ 4, 5, 6, 7};
+ V8x16 u = shift_sse2_left_scalar(v, 2);
+ EXPECT_POISONED(u[0]);
+ EXPECT_POISONED(u[1]);
+ EXPECT_NOT_POISONED(u[0] | (~7U));
+ EXPECT_NOT_POISONED(u[1] | (~31U));
+ u[0] = u[1] = 0;
+ EXPECT_NOT_POISONED(u);
+}
+
+TEST(VectorShiftTest, sse2_left_scalar_by_uninit) {
+ V8x16 v = {0, 1, 2, 3, 4, 5, 6, 7};
+ V8x16 u = shift_sse2_left_scalar(v, *GetPoisoned<U4>());
+ EXPECT_POISONED(u[0]);
+ EXPECT_POISONED(u[1]);
+ EXPECT_POISONED(u[2]);
+ EXPECT_POISONED(u[3]);
+ EXPECT_POISONED(u[4]);
+ EXPECT_POISONED(u[5]);
+ EXPECT_POISONED(u[6]);
+ EXPECT_POISONED(u[7]);
+}
+
+TEST(VectorShiftTest, sse2_left) {
+ V8x16 v = {(U2)(*GetPoisoned<U2>() | 3), (U2)(*GetPoisoned<U2>() | 7), 2, 3,
+ 4, 5, 6, 7};
+ // Top 64 bits of shift count don't affect the result.
+ V2x64 s = {2, *GetPoisoned<U8>()};
+ V8x16 u = shift_sse2_left(v, s);
+ EXPECT_POISONED(u[0]);
+ EXPECT_POISONED(u[1]);
+ EXPECT_NOT_POISONED(u[0] | (~7U));
+ EXPECT_NOT_POISONED(u[1] | (~31U));
+ u[0] = u[1] = 0;
+ EXPECT_NOT_POISONED(u);
+}
+
+TEST(VectorShiftTest, sse2_left_by_uninit) {
+ V8x16 v = {(U2)(*GetPoisoned<U2>() | 3), (U2)(*GetPoisoned<U2>() | 7), 2, 3,
+ 4, 5, 6, 7};
+ V2x64 s = {*GetPoisoned<U8>(), *GetPoisoned<U8>()};
+ V8x16 u = shift_sse2_left(v, s);
+ EXPECT_POISONED(u[0]);
+ EXPECT_POISONED(u[1]);
+ EXPECT_POISONED(u[2]);
+ EXPECT_POISONED(u[3]);
+ EXPECT_POISONED(u[4]);
+ EXPECT_POISONED(u[5]);
+ EXPECT_POISONED(u[6]);
+ EXPECT_POISONED(u[7]);
+}
+
+#ifdef __AVX2__
+V4x32 shift_avx2_left(V4x32 x, V4x32 y) {
+ return _mm_sllv_epi32(x, y);
+}
+// This is variable vector shift that's only available starting with AVX2.
+// V4x32 shift_avx2_left(V4x32 x, V4x32 y) {
+TEST(VectorShiftTest, avx2_left) {
+ V4x32 v = {(U2)(*GetPoisoned<U2>() | 3), (U2)(*GetPoisoned<U2>() | 7), 2, 3};
+ V4x32 s = {2, *GetPoisoned<U4>(), 3, *GetPoisoned<U4>()};
+ V4x32 u = shift_avx2_left(v, s);
+ EXPECT_POISONED(u[0]);
+ EXPECT_NOT_POISONED(u[0] | (~7U));
+ EXPECT_POISONED(u[1]);
+ EXPECT_POISONED(u[1] | (~31U));
+ EXPECT_NOT_POISONED(u[2]);
+ EXPECT_POISONED(u[3]);
+ EXPECT_POISONED(u[3] | (~31U));
+}
+#endif // __AVX2__
+} // namespace
+
+
TEST(MemorySanitizerDr, StoreInDSOTest) {
if (!__msan_has_dynamic_component()) return;
char* s = new char[10];
More information about the llvm-commits
mailing list