[llvm-commits] [llvm] r101851 - in /llvm/trunk: include/llvm/IntrinsicsX86.td lib/Target/X86/X86InstrSSE.td lib/VMCore/AutoUpgrade.cpp test/Bitcode/ssse3_palignr.ll test/Bitcode/ssse3_palignr.ll.bc
Eric Christopher
echristo at apple.com
Mon Apr 19 17:59:54 PDT 2010
Author: echristo
Date: Mon Apr 19 19:59:54 2010
New Revision: 101851
URL: http://llvm.org/viewvc/llvm-project?rev=101851&view=rev
Log:
Remove the palignr intrinsics now that we lower them to vector shuffles,
shifts and null vectors. Autoupgrade these to what we'd lower them to.
Add a testcase to exercise this.
Added:
llvm/trunk/test/Bitcode/ssse3_palignr.ll
llvm/trunk/test/Bitcode/ssse3_palignr.ll.bc (with props)
Modified:
llvm/trunk/include/llvm/IntrinsicsX86.td
llvm/trunk/lib/Target/X86/X86InstrSSE.td
llvm/trunk/lib/VMCore/AutoUpgrade.cpp
Modified: llvm/trunk/include/llvm/IntrinsicsX86.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IntrinsicsX86.td?rev=101851&r1=101850&r2=101851&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IntrinsicsX86.td (original)
+++ llvm/trunk/include/llvm/IntrinsicsX86.td Mon Apr 19 19:59:54 2010
@@ -669,16 +669,6 @@
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
}
-// Align ops
-let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_ssse3_palign_r :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty,
- llvm_v1i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_ssse3_palign_r_128 :
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
-}
-
//===----------------------------------------------------------------------===//
// SSE4.1
Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=101851&r1=101850&r2=101851&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Mon Apr 19 19:59:54 2010
@@ -2939,14 +2939,8 @@
[]>, OpSize;
}
-def : Pat<(int_x86_ssse3_palign_r VR64:$src1, VR64:$src2, (i8 imm:$src3)),
- (PALIGNR64rr VR64:$src1, VR64:$src2, (BYTE_imm imm:$src3))>,
- Requires<[HasSSSE3]>;
-def : Pat<(int_x86_ssse3_palign_r VR64:$src1,
- (memop64 addr:$src2),
- (i8 imm:$src3)),
- (PALIGNR64rm VR64:$src1, addr:$src2, (BYTE_imm imm:$src3))>,
- Requires<[HasSSSE3]>;
+let AddedComplexity = 5 in {
+
def : Pat<(v1i64 (palign:$src3 VR64:$src1, VR64:$src2)),
(PALIGNR64rr VR64:$src2, VR64:$src1,
(SHUFFLE_get_palign_imm VR64:$src3))>,
@@ -2968,16 +2962,6 @@
(SHUFFLE_get_palign_imm VR64:$src3))>,
Requires<[HasSSSE3]>;
-def : Pat<(int_x86_ssse3_palign_r_128 VR128:$src1, VR128:$src2, (i8 imm:$src3)),
- (PALIGNR128rr VR128:$src1, VR128:$src2, (BYTE_imm imm:$src3))>,
- Requires<[HasSSSE3]>;
-def : Pat<(int_x86_ssse3_palign_r_128 VR128:$src1,
- (memopv2i64 addr:$src2),
- (i8 imm:$src3)),
- (PALIGNR128rm VR128:$src1, addr:$src2, (BYTE_imm imm:$src3))>,
- Requires<[HasSSSE3]>;
-
-let AddedComplexity = 5 in {
def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
(PALIGNR128rr VR128:$src2, VR128:$src1,
(SHUFFLE_get_palign_imm VR128:$src3))>,
Modified: llvm/trunk/lib/VMCore/AutoUpgrade.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/VMCore/AutoUpgrade.cpp?rev=101851&r1=101850&r2=101851&view=diff
==============================================================================
--- llvm/trunk/lib/VMCore/AutoUpgrade.cpp (original)
+++ llvm/trunk/lib/VMCore/AutoUpgrade.cpp Mon Apr 19 19:59:54 2010
@@ -19,6 +19,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/IRBuilder.h"
#include <cstring>
using namespace llvm;
@@ -277,8 +278,13 @@
// Calls to these intrinsics are transformed into vector multiplies.
NewFn = 0;
return true;
+ } else if (Name.compare(5, 18, "x86.ssse3.palign.r", 18) == 0 ||
+ Name.compare(5, 22, "x86.ssse3.palign.r.128", 22) == 0) {
+ // Calls to these intrinsics are transformed into vector shuffles, shifts,
+ // or 0.
+ NewFn = 0;
+ return true;
}
-
break;
}
@@ -420,6 +426,118 @@
// Remove upgraded multiply.
CI->eraseFromParent();
+ } else if (F->getName() == "llvm.x86.ssse3.palign.r") {
+ Value *Op1 = CI->getOperand(1);
+ Value *Op2 = CI->getOperand(2);
+ Value *Op3 = CI->getOperand(3);
+ unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
+ Value *Rep;
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+
+ // If palignr is shifting the pair of input vectors less than 9 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 8) {
+ const Type *IntTy = Type::getInt32Ty(C);
+ const Type *EltTy = Type::getInt8Ty(C);
+ const Type *VecTy = VectorType::get(EltTy, 8);
+
+ Op2 = Builder.CreateBitCast(Op2, VecTy);
+ Op1 = Builder.CreateBitCast(Op1, VecTy);
+
+ llvm::SmallVector<llvm::Constant*, 8> Indices;
+ for (unsigned i = 0; i != 8; ++i)
+ Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
+
+ Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
+ Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
+ Rep = Builder.CreateBitCast(Rep, F->getReturnType());
+ }
+
+ // If palignr is shifting the pair of input vectors more than 8 but less
+ // than 16 bytes, emit a logical right shift of the destination.
+ else if (shiftVal < 16) {
+ // MMX has these as 1 x i64 vectors for some odd optimization reasons.
+ const Type *EltTy = Type::getInt64Ty(C);
+ const Type *VecTy = VectorType::get(EltTy, 1);
+
+ Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
+ Op2 = ConstantInt::get(VecTy, (shiftVal-8) * 8);
+
+ // create i32 constant
+ Function *I =
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_mmx_psrl_q);
+ Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ else {
+ Rep = Constant::getNullValue(F->getReturnType());
+ }
+
+ // Replace any uses with our new instruction.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(Rep);
+
+ // Remove upgraded instruction.
+ CI->eraseFromParent();
+
+ } else if (F->getName() == "llvm.x86.ssse3.palign.r.128") {
+ Value *Op1 = CI->getOperand(1);
+ Value *Op2 = CI->getOperand(2);
+ Value *Op3 = CI->getOperand(3);
+ unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
+ Value *Rep;
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+
+ // If palignr is shifting the pair of input vectors less than 17 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 16) {
+ const Type *IntTy = Type::getInt32Ty(C);
+ const Type *EltTy = Type::getInt8Ty(C);
+ const Type *VecTy = VectorType::get(EltTy, 16);
+
+ Op2 = Builder.CreateBitCast(Op2, VecTy);
+ Op1 = Builder.CreateBitCast(Op1, VecTy);
+
+ llvm::SmallVector<llvm::Constant*, 16> Indices;
+ for (unsigned i = 0; i != 16; ++i)
+ Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
+
+ Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
+ Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
+ Rep = Builder.CreateBitCast(Rep, F->getReturnType());
+ }
+
+ // If palignr is shifting the pair of input vectors more than 16 but less
+ // than 32 bytes, emit a logical right shift of the destination.
+ else if (shiftVal < 32) {
+ const Type *EltTy = Type::getInt64Ty(C);
+ const Type *VecTy = VectorType::get(EltTy, 2);
+ const Type *IntTy = Type::getInt32Ty(C);
+
+ Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
+ Op2 = ConstantInt::get(IntTy, (shiftVal-16) * 8);
+
+ // create i32 constant
+ Function *I =
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_sse2_psrl_dq);
+ Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ else {
+ Rep = Constant::getNullValue(F->getReturnType());
+ }
+
+ // Replace any uses with our new instruction.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(Rep);
+
+ // Remove upgraded instruction.
+ CI->eraseFromParent();
+
} else {
llvm_unreachable("Unknown function for CallInst upgrade.");
}
Added: llvm/trunk/test/Bitcode/ssse3_palignr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Bitcode/ssse3_palignr.ll?rev=101851&view=auto
==============================================================================
--- llvm/trunk/test/Bitcode/ssse3_palignr.ll (added)
+++ llvm/trunk/test/Bitcode/ssse3_palignr.ll Mon Apr 19 19:59:54 2010
@@ -0,0 +1 @@
+; RUN: llvm-dis < %s.bc | not grep {@llvm\\.palign}
Added: llvm/trunk/test/Bitcode/ssse3_palignr.ll.bc
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Bitcode/ssse3_palignr.ll.bc?rev=101851&view=auto
==============================================================================
Binary file - no diff available.
Propchange: llvm/trunk/test/Bitcode/ssse3_palignr.ll.bc
------------------------------------------------------------------------------
svn:mime-type = application/octet-stream
More information about the llvm-commits
mailing list