[llvm] r226978 - [AArch64][LoadStoreOptimizer] Form LDPSW when possible.

Quentin Colombet qcolombet at apple.com
Fri Jan 23 17:25:54 PST 2015


Author: qcolombet
Date: Fri Jan 23 19:25:54 2015
New Revision: 226978

URL: http://llvm.org/viewvc/llvm-project?rev=226978&view=rev
Log:
[AArch64][LoadStoreOptimizer] Form LDPSW when possible.

This patch adds the missing LD[U]RSW variants to the load store optimizer, so
that we generate LDPSW when possible.

<rdar://problem/19583480>

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
    llvm/trunk/test/CodeGen/AArch64/arm64-ldp.ll

Modified: llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp?rev=226978&r1=226977&r2=226978&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp Fri Jan 23 19:25:54 2015
@@ -135,6 +135,8 @@ static bool isUnscaledLdst(unsigned Opc)
     return true;
   case AArch64::LDURXi:
     return true;
+  case AArch64::LDURSWi:
+    return true;
   }
 }
 
@@ -173,6 +175,9 @@ int AArch64LoadStoreOpt::getMemSize(Mach
   case AArch64::LDRXui:
   case AArch64::LDURXi:
     return 8;
+  case AArch64::LDRSWui:
+  case AArch64::LDURSWi:
+    return 4;
   }
 }
 
@@ -210,6 +215,9 @@ static unsigned getMatchingPairOpcode(un
   case AArch64::LDRXui:
   case AArch64::LDURXi:
     return AArch64::LDPXi;
+  case AArch64::LDRSWui:
+  case AArch64::LDURSWi:
+    return AArch64::LDPSWi;
   }
 }
 
@@ -237,6 +245,8 @@ static unsigned getPreIndexedOpcode(unsi
     return AArch64::LDRWpre;
   case AArch64::LDRXui:
     return AArch64::LDRXpre;
+  case AArch64::LDRSWui:
+    return AArch64::LDRSWpre;
   }
 }
 
@@ -264,6 +274,8 @@ static unsigned getPostIndexedOpcode(uns
     return AArch64::LDRWpost;
   case AArch64::LDRXui:
     return AArch64::LDRXpost;
+  case AArch64::LDRSWui:
+    return AArch64::LDRSWpost;
   }
 }
 
@@ -780,6 +792,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(
     case AArch64::LDRQui:
     case AArch64::LDRXui:
     case AArch64::LDRWui:
+    case AArch64::LDRSWui:
     // do the unscaled versions as well
     case AArch64::STURSi:
     case AArch64::STURDi:
@@ -790,7 +803,8 @@ bool AArch64LoadStoreOpt::optimizeBlock(
     case AArch64::LDURDi:
     case AArch64::LDURQi:
     case AArch64::LDURWi:
-    case AArch64::LDURXi: {
+    case AArch64::LDURXi:
+    case AArch64::LDURSWi: {
       // If this is a volatile load/store, don't mess with it.
       if (MI->hasOrderedMemoryRef()) {
         ++MBBI;

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-ldp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-ldp.ll?rev=226978&r1=226977&r2=226978&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-ldp.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-ldp.ll Fri Jan 23 19:25:54 2015
@@ -12,6 +12,18 @@ define i32 @ldp_int(i32* %p) nounwind {
   ret i32 %add
 }
 
+; CHECK: ldp_sext_int
+; CHECK: ldpsw
+define i64 @ldp_sext_int(i32* %p) nounwind {
+  %tmp = load i32* %p, align 4
+  %add.ptr = getelementptr inbounds i32* %p, i64 1
+  %tmp1 = load i32* %add.ptr, align 4
+  %sexttmp = sext i32 %tmp to i64
+  %sexttmp1 = sext i32 %tmp1 to i64
+  %add = add nsw i64 %sexttmp1, %sexttmp
+  ret i64 %add
+}
+
 ; CHECK: ldp_long
 ; CHECK: ldp
 define i64 @ldp_long(i64* %p) nounwind {
@@ -56,6 +68,21 @@ define i32 @ldur_int(i32* %a) nounwind {
   ret i32 %tmp3
 }
 
+define i64 @ldur_sext_int(i32* %a) nounwind {
+; LDUR_CHK: ldur_sext_int
+; LDUR_CHK: ldpsw     [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-8]
+; LDUR_CHK-NEXT: add     x{{[0-9]+}}, [[DST2]], [[DST1]]
+; LDUR_CHK-NEXT: ret
+  %p1 = getelementptr inbounds i32* %a, i32 -1
+  %tmp1 = load i32* %p1, align 2
+  %p2 = getelementptr inbounds i32* %a, i32 -2
+  %tmp2 = load i32* %p2, align 2
+  %sexttmp1 = sext i32 %tmp1 to i64
+  %sexttmp2 = sext i32 %tmp2 to i64
+  %tmp3 = add i64 %sexttmp1, %sexttmp2
+  ret i64 %tmp3
+}
+
 define i64 @ldur_long(i64* %a) nounwind ssp {
 ; LDUR_CHK: ldur_long
 ; LDUR_CHK: ldp     [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-16]
@@ -110,6 +137,22 @@ define i64 @pairUpBarelyIn(i64* %a) noun
   ret i64 %tmp3
 }
 
+define i64 @pairUpBarelyInSext(i32* %a) nounwind ssp {
+; LDUR_CHK: pairUpBarelyInSext
+; LDUR_CHK-NOT: ldur
+; LDUR_CHK: ldpsw     [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-256]
+; LDUR_CHK-NEXT: add     x{{[0-9]+}}, [[DST2]], [[DST1]]
+; LDUR_CHK-NEXT: ret
+  %p1 = getelementptr inbounds i32* %a, i64 -63
+  %tmp1 = load i32* %p1, align 2
+  %p2 = getelementptr inbounds i32* %a, i64 -64
+  %tmp2 = load i32* %p2, align 2
+  %sexttmp1 = sext i32 %tmp1 to i64
+  %sexttmp2 = sext i32 %tmp2 to i64
+  %tmp3 = add i64 %sexttmp1, %sexttmp2
+  ret i64 %tmp3
+}
+
 define i64 @pairUpBarelyOut(i64* %a) nounwind ssp {
 ; LDUR_CHK: pairUpBarelyOut
 ; LDUR_CHK-NOT: ldp
@@ -125,6 +168,23 @@ define i64 @pairUpBarelyOut(i64* %a) nou
   ret i64 %tmp3
 }
 
+define i64 @pairUpBarelyOutSext(i32* %a) nounwind ssp {
+; LDUR_CHK: pairUpBarelyOutSext
+; LDUR_CHK-NOT: ldp
+; Don't be fragile about which loads or manipulations of the base register
+; are used---just check that there isn't an ldp before the add
+; LDUR_CHK: add
+; LDUR_CHK-NEXT: ret
+  %p1 = getelementptr inbounds i32* %a, i64 -64
+  %tmp1 = load i32* %p1, align 2
+  %p2 = getelementptr inbounds i32* %a, i64 -65
+  %tmp2 = load i32* %p2, align 2
+  %sexttmp1 = sext i32 %tmp1 to i64
+  %sexttmp2 = sext i32 %tmp2 to i64
+  %tmp3 = add i64 %sexttmp1, %sexttmp2
+  ret i64 %tmp3
+}
+
 define i64 @pairUpNotAligned(i64* %a) nounwind ssp {
 ; LDUR_CHK: pairUpNotAligned
 ; LDUR_CHK-NOT: ldp
@@ -147,3 +207,28 @@ define i64 @pairUpNotAligned(i64* %a) no
   %tmp3 = add i64 %tmp1, %tmp2
   ret i64 %tmp3
 }
+
+define i64 @pairUpNotAlignedSext(i32* %a) nounwind ssp {
+; LDUR_CHK: pairUpNotAlignedSext
+; LDUR_CHK-NOT: ldp
+; LDUR_CHK: ldursw
+; LDUR_CHK-NEXT: ldursw
+; LDUR_CHK-NEXT: add
+; LDUR_CHK-NEXT: ret
+  %p1 = getelementptr inbounds i32* %a, i64 -18
+  %bp1 = bitcast i32* %p1 to i8*
+  %bp1p1 = getelementptr inbounds i8* %bp1, i64 1
+  %dp1 = bitcast i8* %bp1p1 to i32*
+  %tmp1 = load i32* %dp1, align 1
+
+  %p2 = getelementptr inbounds i32* %a, i64 -17
+  %bp2 = bitcast i32* %p2 to i8*
+  %bp2p1 = getelementptr inbounds i8* %bp2, i64 1
+  %dp2 = bitcast i8* %bp2p1 to i32*
+  %tmp2 = load i32* %dp2, align 1
+
+  %sexttmp1 = sext i32 %tmp1 to i64
+  %sexttmp2 = sext i32 %tmp2 to i64
+  %tmp3 = add i64 %sexttmp1, %sexttmp2
+ ret i64 %tmp3
+}





More information about the llvm-commits mailing list