[llvm] r271677 - [AArch64] Spot SBFX-compatbile code expressed with sign_extend_inreg.

Chad Rosier via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 3 08:00:10 PDT 2016


Author: mcrosier
Date: Fri Jun  3 10:00:09 2016
New Revision: 271677

URL: http://llvm.org/viewvc/llvm-project?rev=271677&view=rev
Log:
[AArch64] Spot SBFX-compatbile code expressed with sign_extend_inreg.

We were assuming all SBFX-like operations would have the shl/asr form, but often
when the field being extracted is an i8 or i16, we end up with a
SIGN_EXTEND_INREG acting on a shift instead.

This is a port of r213754 from ARM to AArch64.

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
    llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll

Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp?rev=271677&r1=271676&r2=271677&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp Fri Jun  3 10:00:09 2016
@@ -1505,6 +1505,39 @@ static bool isBitfieldExtractOpFromAnd(S
   return true;
 }
 
+static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc,
+                                             SDValue &Opd0, unsigned &Immr,
+                                             unsigned &Imms) {
+  assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
+
+  EVT VT = N->getValueType(0);
+  unsigned BitWidth = VT.getSizeInBits();
+  assert((VT == MVT::i32 || VT == MVT::i64) &&
+         "Type checking must have been done before calling this function");
+
+  SDValue Op = N->getOperand(0);
+  if (Op->getOpcode() == ISD::TRUNCATE) {
+    Op = Op->getOperand(0);
+    VT = Op->getValueType(0);
+    BitWidth = VT.getSizeInBits();
+  }
+
+  uint64_t ShiftImm;
+  if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRL, ShiftImm) &&
+      !isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
+    return false;
+
+  unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
+  if (ShiftImm + Width > BitWidth)
+    return false;
+
+  Opc = (VT == MVT::i32) ? AArch64::SBFMWri : AArch64::SBFMXri;
+  Opd0 = Op.getOperand(0);
+  Immr = ShiftImm;
+  Imms = ShiftImm + Width - 1;
+  return true;
+}
+
 static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
                                           SDValue &Opd0, unsigned &LSB,
                                           unsigned &MSB) {
@@ -1635,6 +1668,9 @@ static bool isBitfieldExtractOp(Selectio
   case ISD::SRL:
   case ISD::SRA:
     return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
+
+  case ISD::SIGN_EXTEND_INREG:
+    return isBitfieldExtractOpFromSExtInReg(N, Opc, Opd0, Immr, Imms);
   }
 
   unsigned NOpc = N->getMachineOpcode();
@@ -2545,6 +2581,7 @@ void AArch64DAGToDAGISel::Select(SDNode
   case ISD::SRL:
   case ISD::AND:
   case ISD::SRA:
+  case ISD::SIGN_EXTEND_INREG:
     if (tryBitfieldExtractOp(Node))
       return;
     if (tryBitfieldInsertInZeroOp(Node))

Modified: llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll?rev=271677&r1=271676&r2=271677&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll Fri Jun  3 10:00:09 2016
@@ -463,3 +463,91 @@ define i64 @test8(i64 %a) {
   %2 = or i64 %1, 157601565442048     ; 0x00008f5679530000
   ret i64 %2
 }
+
+; CHECK-LABEL: @test9
+; CHECK: sbfx w0, w0, #23, #8
+define signext i8 @test9(i32 %a) {
+  %tmp = ashr i32 %a, 23
+  %res = trunc i32 %tmp to i8
+  ret i8 %res
+}
+
+; CHECK-LABEL: @test10
+; CHECK: sbfx w0, w0, #23, #8
+define signext i8 @test10(i32 %a) {
+  %tmp = lshr i32 %a, 23
+  %res = trunc i32 %tmp to i8
+  ret i8 %res
+}
+
+; CHECK-LABEL: @test11
+; CHECK: sbfx w0, w0, #15, #16
+define signext i16 @test11(i32 %a) {
+  %tmp = lshr i32 %a, 15
+  %res = trunc i32 %tmp to i16
+  ret i16 %res
+}
+
+; CHECK-LABEL: @test12
+; CHECK: sbfx w0, w0, #16, #8
+define signext i8 @test12(i64 %a) {
+  %tmp = lshr i64 %a, 16
+  %res = trunc i64 %tmp to i8
+  ret i8 %res
+}
+
+; CHECK-LABEL: @test13
+; CHECK: sbfx x0, x0, #30, #8
+define signext i8 @test13(i64 %a) {
+  %tmp = lshr i64 %a, 30
+  %res = trunc i64 %tmp to i8
+  ret i8 %res
+}
+
+; CHECK-LABEL: @test14
+; CHECK: sbfx x0, x0, #23, #16
+define signext i16 @test14(i64 %a) {
+  %tmp = lshr i64 %a, 23
+  %res = trunc i64 %tmp to i16
+  ret i16 %res
+}
+
+; CHECK-LABEL: @test15
+; CHECK: asr w0, w0, #25
+define signext i8 @test15(i32 %a) {
+  %tmp = ashr i32 %a, 25
+  %res = trunc i32 %tmp to i8
+  ret i8 %res
+}
+
+; CHECK-LABEL: @test16
+; CHECK: lsr w0, w0, #25
+define signext i8 @test16(i32 %a) {
+  %tmp = lshr i32 %a, 25
+  %res = trunc i32 %tmp to i8
+  ret i8 %res
+}
+
+; CHECK-LABEL: @test17
+; CHECK: lsr x0, x0, #49
+define signext i16 @test17(i64 %a) {
+  %tmp = lshr i64 %a, 49
+  %res = trunc i64 %tmp to i16
+  ret i16 %res
+}
+
+; SHR with multiple uses is fine as SXTH and SBFX are both aliases of SBFM.
+; However, allowing the transformation means the SHR and SBFX can execute in
+; parallel.
+;
+; CHECK-LABEL: @test18
+; CHECK: lsr x1, x0, #23
+; CHECK: sbfx x0, x0, #23, #16
+define void @test18(i64 %a) {
+  %tmp = lshr i64 %a, 23
+  %res = trunc i64 %tmp to i16
+  call void @use(i16 %res, i64 %tmp)
+  ret void
+}
+
+declare void @use(i16 signext, i64)




More information about the llvm-commits mailing list