[llvm] r205209 - ARM64: add extra patterns for scalar shifts
Tim Northover
tnorthover at apple.com
Mon Mar 31 08:46:46 PDT 2014
Author: tnorthover
Date: Mon Mar 31 10:46:46 2014
New Revision: 205209
URL: http://llvm.org/viewvc/llvm-project?rev=205209&view=rev
Log:
ARM64: add extra patterns for scalar shifts
Modified:
llvm/trunk/lib/Target/ARM64/ARM64InstrFormats.td
llvm/trunk/test/CodeGen/ARM64/vshr.ll
llvm/trunk/test/CodeGen/ARM64/vsra.ll
Modified: llvm/trunk/lib/Target/ARM64/ARM64InstrFormats.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM64/ARM64InstrFormats.td?rev=205209&r1=205208&r2=205209&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM64/ARM64InstrFormats.td (original)
+++ llvm/trunk/lib/Target/ARM64/ARM64InstrFormats.td Mon Mar 31 10:46:46 2014
@@ -6651,22 +6651,28 @@ multiclass SIMDScalarRShiftD<bit U, bits
SDPatternOperator OpNode> {
def d : BaseSIMDScalarShift<U, opc, {1,?,?,?,?,?,?},
FPR64, FPR64, vecshiftR64, asm,
- [(set (v1i64 FPR64:$Rd),
- (OpNode (v1i64 FPR64:$Rn), (i32 vecshiftR64:$imm)))]> {
+ [(set (i64 FPR64:$Rd),
+ (OpNode (i64 FPR64:$Rn), (i32 vecshiftR64:$imm)))]> {
let Inst{21-16} = imm{5-0};
}
+
+ def : Pat<(v1i64 (OpNode (v1i64 FPR64:$Rn), (i32 vecshiftR64:$imm))),
+ (!cast<Instruction>(NAME # "d") FPR64:$Rn, vecshiftR64:$imm)>;
}
-let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
multiclass SIMDScalarRShiftDTied<bit U, bits<5> opc, string asm,
SDPatternOperator OpNode = null_frag> {
def d : BaseSIMDScalarShiftTied<U, opc, {1,?,?,?,?,?,?},
FPR64, FPR64, vecshiftR64, asm,
- [(set (v1i64 FPR64:$dst),
- (OpNode (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
- (i32 vecshiftR64:$imm)))]> {
+ [(set (i64 FPR64:$dst), (OpNode (i64 FPR64:$Rd), (i64 FPR64:$Rn),
+ (i32 vecshiftR64:$imm)))]> {
let Inst{21-16} = imm{5-0};
}
+
+ def : Pat<(v1i64 (OpNode (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
+ (i32 vecshiftR64:$imm))),
+ (!cast<Instruction>(NAME # "d") FPR64:$Rd, FPR64:$Rn,
+ vecshiftR64:$imm)>;
}
multiclass SIMDScalarLShiftD<bit U, bits<5> opc, string asm,
Modified: llvm/trunk/test/CodeGen/ARM64/vshr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM64/vshr.ll?rev=205209&r1=205208&r2=205209&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM64/vshr.ll (original)
+++ llvm/trunk/test/CodeGen/ARM64/vshr.ll Mon Mar 31 10:46:46 2014
@@ -46,4 +46,18 @@ entry:
ret <8 x i16> %shr
}
+define <1 x i64> @sshr_v1i64(<1 x i64> %A) nounwind {
+; CHECK-LABEL: sshr_v1i64:
+; CHECK: sshr d0, d0, #63
+ %tmp3 = ashr <1 x i64> %A, < i64 63 >
+ ret <1 x i64> %tmp3
+}
+
+define <1 x i64> @ushr_v1i64(<1 x i64> %A) nounwind {
+; CHECK-LABEL: ushr_v1i64:
+; CHECK: ushr d0, d0, #63
+ %tmp3 = lshr <1 x i64> %A, < i64 63 >
+ ret <1 x i64> %tmp3
+}
+
attributes #0 = { nounwind }
Modified: llvm/trunk/test/CodeGen/ARM64/vsra.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM64/vsra.ll?rev=205209&r1=205208&r2=205209&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM64/vsra.ll (original)
+++ llvm/trunk/test/CodeGen/ARM64/vsra.ll Mon Mar 31 10:46:46 2014
@@ -140,3 +140,11 @@ define <2 x i64> @vsraQu64(<2 x i64>* %A
%tmp4 = add <2 x i64> %tmp1, %tmp3
ret <2 x i64> %tmp4
}
+
+define <1 x i64> @vsra_v1i64(<1 x i64> %A, <1 x i64> %B) nounwind {
+; CHECK-LABEL: vsra_v1i64:
+; CHECK: ssra d0, d1, #63
+ %tmp3 = ashr <1 x i64> %B, < i64 63 >
+ %tmp4 = add <1 x i64> %A, %tmp3
+ ret <1 x i64> %tmp4
+}
More information about the llvm-commits
mailing list