[cfe-commits] r101332 - in /cfe/trunk: include/clang/Basic/BuiltinsX86.def lib/CodeGen/CGBuiltin.cpp lib/Headers/tmmintrin.h test/CodeGen/palignr.c
Eric Christopher
echristo at apple.com
Wed Apr 14 18:43:09 PDT 2010
Author: echristo
Date: Wed Apr 14 20:43:08 2010
New Revision: 101332
URL: http://llvm.org/viewvc/llvm-project?rev=101332&view=rev
Log:
Rewrite handling of 64-bit palignr intrinsics to be vector shuffles.
Stop multiplying constant by 8 accordingly in the header and change
intrinsic definition for what types we expect.
Add to existing palignr test to check that we're emitting the correct things.
Modified:
cfe/trunk/include/clang/Basic/BuiltinsX86.def
cfe/trunk/lib/CodeGen/CGBuiltin.cpp
cfe/trunk/lib/Headers/tmmintrin.h
cfe/trunk/test/CodeGen/palignr.c
Modified: cfe/trunk/include/clang/Basic/BuiltinsX86.def
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/BuiltinsX86.def?rev=101332&r1=101331&r2=101332&view=diff
==============================================================================
--- cfe/trunk/include/clang/Basic/BuiltinsX86.def (original)
+++ cfe/trunk/include/clang/Basic/BuiltinsX86.def Wed Apr 14 20:43:08 2010
@@ -245,7 +245,7 @@
BUILTIN(__builtin_ia32_mwait, "vUiUi", "")
BUILTIN(__builtin_ia32_lddqu, "V16ccC*", "")
BUILTIN(__builtin_ia32_palignr128, "V16cV16cV16cc", "")
-BUILTIN(__builtin_ia32_palignr, "V1LLiV1LLiV1LLic", "")
+BUILTIN(__builtin_ia32_palignr, "V8cV8cV8cc", "")
BUILTIN(__builtin_ia32_insertps128, "V4fV4fV4fi", "")
BUILTIN(__builtin_ia32_storelv4si, "vV2i*V2LLi", "")
Modified: cfe/trunk/lib/CodeGen/CGBuiltin.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGBuiltin.cpp?rev=101332&r1=101331&r2=101332&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGBuiltin.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGBuiltin.cpp Wed Apr 14 20:43:08 2010
@@ -982,8 +982,38 @@
return Builder.CreateStore(Ops[1], Ops[0]);
}
case X86::BI__builtin_ia32_palignr: {
- Function *F = CGM.getIntrinsic(Intrinsic::x86_ssse3_palign_r);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size());
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 9 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 8) {
+ const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
+
+ llvm::SmallVector<llvm::Constant*, 8> Indices;
+ for (unsigned i = 0; i != 8; ++i)
+ Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i));
+
+ Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 8 but less
+ // than 16 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 16) {
+ // MMX has these as 1 x i64 vectors for some odd optimization reasons.
+ const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
+ const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 1);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
case X86::BI__builtin_ia32_palignr128: {
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
Modified: cfe/trunk/lib/Headers/tmmintrin.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/tmmintrin.h?rev=101332&r1=101331&r2=101332&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/tmmintrin.h (original)
+++ cfe/trunk/lib/Headers/tmmintrin.h Wed Apr 14 20:43:08 2010
@@ -67,7 +67,7 @@
}
#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
-#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n*8)))
+#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n)))
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hadd_epi16(__m128i a, __m128i b)
Modified: cfe/trunk/test/CodeGen/palignr.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/palignr.c?rev=101332&r1=101331&r2=101332&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/palignr.c (original)
+++ cfe/trunk/test/CodeGen/palignr.c Wed Apr 14 20:43:08 2010
@@ -1,13 +1,9 @@
// RUN: %clang_cc1 %s -triple=i686-apple-darwin -target-feature +ssse3 -O1 -S -o - | FileCheck %s
#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
-#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n*8)))
-typedef __attribute__((vector_size(8))) int int2;
typedef __attribute__((vector_size(16))) int int4;
// CHECK: palignr
-int2 mmx_align1(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 7); }
-// CHECK: palignr
int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); }
// CHECK: ret
// CHECK: ret
@@ -17,3 +13,18 @@
int4 align3(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 17); }
// CHECK: xor
int4 align4(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 32); }
+
+#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n)))
+typedef __attribute__((vector_size(8))) int int2;
+
+// CHECK-NOT: palignr
+int2 align5(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 8); }
+
+// CHECK: psrlq
+int2 align6(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 9); }
+
+// CHECK: xor
+int2 align7(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 16); }
+
+// CHECK: palignr
+int2 align8(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 7); }
\ No newline at end of file
More information about the cfe-commits
mailing list