[llvm] r205925 - [AArch64] Implement the isTruncateFree API.

Chad Rosier mcrosier at codeaurora.org
Wed Apr 9 13:43:41 PDT 2014


Author: mcrosier
Date: Wed Apr  9 15:43:40 2014
New Revision: 205925

URL: http://llvm.org/viewvc/llvm-project?rev=205925&view=rev
Log:
[AArch64] Implement the isTruncateFree API.

In AArch64 i64 to i32 truncate operation is a subregister access.

This allows more opportunities for LSR optmization to eliminate
variables of different types (i32 and i64).

Added:
    llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll
Modified:
    llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h

Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=205925&r1=205924&r2=205925&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Wed Apr  9 15:43:40 2014
@@ -5368,3 +5368,24 @@ bool AArch64TargetLowering::getTgtMemInt
 
   return false;
 }
+
+// Truncations from 64-bit GPR to 32-bit GPR is free.
+bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
+  if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
+    return false;
+  unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
+  unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
+  if (NumBits1 <= NumBits2)
+    return false;
+  return true;
+}
+
+bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
+  if (!VT1.isInteger() || !VT2.isInteger())
+    return false;
+  unsigned NumBits1 = VT1.getSizeInBits();
+  unsigned NumBits2 = VT2.getSizeInBits();
+  if (NumBits1 <= NumBits2)
+    return false;
+  return true;
+}

Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h?rev=205925&r1=205924&r2=205925&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h Wed Apr  9 15:43:40 2014
@@ -277,6 +277,10 @@ public:
   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
 
   bool isLegalICmpImmediate(int64_t Val) const;
+
+  bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
+  bool isTruncateFree(EVT VT1, EVT VT2) const override;
+
   SDValue getSelectableIntSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC,
                          SDValue &A64cc, SelectionDAG &DAG, SDLoc &dl) const;
 

Added: llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll?rev=205925&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll Wed Apr  9 15:43:40 2014
@@ -0,0 +1,38 @@
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+
+; Check  trunc i64 operation is translated as a subregister access
+; eliminating an i32 induction varible.
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #1
+; CHECK-NOT: add {{w[0-9]+}}, {{w[0-9]+}}, #1
+; CHECK-NEXT: cmp {{w[0-9]+}}, {{w[0-9]+}}, uxtw
+define void @test1_signed([8 x i8]* nocapture %a, i8* nocapture readonly %box, i8 %limit) {
+entry:
+  %conv = zext i8 %limit to i32
+  %cmp223 = icmp eq i8 %limit, 0
+  br i1 %cmp223, label %for.end15, label %for.body4.lr.ph.us
+
+for.body4.us:
+  %indvars.iv = phi i64 [ 0, %for.body4.lr.ph.us ], [ %indvars.iv.next, %for.body4.us ]
+  %arrayidx6.us = getelementptr inbounds [8 x i8]* %a, i64 %indvars.iv26, i64 %indvars.iv
+  %0 = load i8* %arrayidx6.us, align 1
+  %idxprom7.us = zext i8 %0 to i64
+  %arrayidx8.us = getelementptr inbounds i8* %box, i64 %idxprom7.us
+  %1 = load i8* %arrayidx8.us, align 1
+  store i8 %1, i8* %arrayidx6.us, align 1
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %2 = trunc i64 %indvars.iv.next to i32
+  %cmp2.us = icmp slt i32 %2, %conv
+  br i1 %cmp2.us, label %for.body4.us, label %for.cond1.for.inc13_crit_edge.us
+
+for.body4.lr.ph.us:
+  %indvars.iv26 = phi i64 [ %indvars.iv.next27, %for.cond1.for.inc13_crit_edge.us ], [ 0, %entry ]
+  br label %for.body4.us
+
+for.cond1.for.inc13_crit_edge.us:
+  %indvars.iv.next27 = add nuw nsw i64 %indvars.iv26, 1
+  %exitcond28 = icmp eq i64 %indvars.iv26, 3
+  br i1 %exitcond28, label %for.end15, label %for.body4.lr.ph.us
+
+for.end15:
+  ret void
+}





More information about the llvm-commits mailing list