[llvm] r253711 - [AArch64]Merge narrow zero stores to a wider store

Jun Bum Lim via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 20 13:14:08 PST 2015


Author: junbuml
Date: Fri Nov 20 15:14:07 2015
New Revision: 253711

URL: http://llvm.org/viewvc/llvm-project?rev=253711&view=rev
Log:
[AArch64]Merge narrow zero stores to a wider store

This change merges adjacent zero stores into a wider single store.
For example :
  strh wzr, [x0]
  strh wzr, [x0, #2]
becomes
  str wzr, [x0]

This will fix PR25410.

Added:
    llvm/trunk/test/CodeGen/AArch64/arm64-narrow-ldst-merge.ll
      - copied, changed from r253698, llvm/trunk/test/CodeGen/AArch64/arm64-ldr-merge.ll
Removed:
    llvm/trunk/test/CodeGen/AArch64/arm64-ldr-merge.ll
Modified:
    llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp

Modified: llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp?rev=253711&r1=253710&r2=253711&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp Fri Nov 20 15:14:07 2015
@@ -42,6 +42,7 @@ STATISTIC(NumPreFolded, "Number of pre-i
 STATISTIC(NumUnscaledPairCreated,
           "Number of load/store from unscaled generated");
 STATISTIC(NumNarrowLoadsPromoted, "Number of narrow loads promoted");
+STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
 
 static cl::opt<unsigned> ScanLimit("aarch64-load-store-scan-limit",
                                    cl::init(20), cl::Hidden);
@@ -152,6 +153,8 @@ static bool isUnscaledLdSt(unsigned Opc)
   case AArch64::STURSi:
   case AArch64::STURDi:
   case AArch64::STURQi:
+  case AArch64::STURBBi:
+  case AArch64::STURHHi:
   case AArch64::STURWi:
   case AArch64::STURXi:
   case AArch64::LDURSi:
@@ -189,6 +192,22 @@ static unsigned getBitExtrOpcode(Machine
   }
 }
 
+static bool isNarrowStore(unsigned Opc) {
+  switch (Opc) {
+  default:
+    return false;
+  case AArch64::STRBBui:
+  case AArch64::STURBBi:
+  case AArch64::STRHHui:
+  case AArch64::STURHHi:
+    return true;
+  }
+}
+
+static bool isNarrowStore(MachineInstr *MI) {
+  return isNarrowStore(MI->getOpcode());
+}
+
 static bool isNarrowLoad(unsigned Opc) {
   switch (Opc) {
   default:
@@ -219,12 +238,14 @@ static int getMemScale(MachineInstr *MI)
   case AArch64::LDRSBWui:
   case AArch64::LDURSBWi:
   case AArch64::STRBBui:
+  case AArch64::STURBBi:
     return 1;
   case AArch64::LDRHHui:
   case AArch64::LDURHHi:
   case AArch64::LDRSHWui:
   case AArch64::LDURSHWi:
   case AArch64::STRHHui:
+  case AArch64::STURHHi:
     return 2;
   case AArch64::LDRSui:
   case AArch64::LDURSi:
@@ -278,6 +299,10 @@ static unsigned getMatchingNonSExtOpcode
   case AArch64::STURDi:
   case AArch64::STRQui:
   case AArch64::STURQi:
+  case AArch64::STRBBui:
+  case AArch64::STURBBi:
+  case AArch64::STRHHui:
+  case AArch64::STURHHi:
   case AArch64::STRWui:
   case AArch64::STURWi:
   case AArch64::STRXui:
@@ -327,6 +352,14 @@ static unsigned getMatchingPairOpcode(un
   case AArch64::STRQui:
   case AArch64::STURQi:
     return AArch64::STPQi;
+  case AArch64::STRBBui:
+    return AArch64::STRHHui;
+  case AArch64::STRHHui:
+    return AArch64::STRWui;
+  case AArch64::STURBBi:
+    return AArch64::STURHHi;
+  case AArch64::STURHHi:
+    return AArch64::STURWi;
   case AArch64::STRWui:
   case AArch64::STURWi:
     return AArch64::STPWi;
@@ -681,17 +714,33 @@ AArch64LoadStoreOpt::mergePairedInsns(Ma
     return NextI;
   }
 
-  // Handle Unscaled
-  if (IsUnscaled)
-    OffsetImm /= OffsetStride;
-
   // Construct the new instruction.
-  MachineInstrBuilder MIB = BuildMI(*I->getParent(), InsertionPoint,
-                                    I->getDebugLoc(), TII->get(NewOpc))
-                                .addOperand(getLdStRegOp(RtMI))
-                                .addOperand(getLdStRegOp(Rt2MI))
-                                .addOperand(BaseRegOp)
-                                .addImm(OffsetImm);
+  MachineInstrBuilder MIB;
+  if (isNarrowStore(Opc)) {
+    // Change the scaled offset from small to large type.
+    if (!IsUnscaled) {
+      assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
+      OffsetImm /= 2;
+    }
+    MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
+                  TII->get(NewOpc))
+              .addOperand(getLdStRegOp(I))
+              .addOperand(BaseRegOp)
+              .addImm(OffsetImm);
+    // Copy MachineMemOperands from the original stores.
+    concatenateMemOperands(MIB, I, Paired);
+  } else {
+    // Handle Unscaled
+    if (IsUnscaled)
+      OffsetImm /= OffsetStride;
+    MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
+                  TII->get(NewOpc))
+              .addOperand(getLdStRegOp(RtMI))
+              .addOperand(getLdStRegOp(Rt2MI))
+              .addOperand(BaseRegOp)
+              .addImm(OffsetImm);
+  }
+
   (void)MIB;
 
   // FIXME: Do we need/want to copy the mem operands from the source
@@ -830,6 +879,11 @@ AArch64LoadStoreOpt::findMatchingInsn(Ma
   unsigned Reg = getLdStRegOp(FirstMI).getReg();
   unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
   int Offset = getLdStOffsetOp(FirstMI).getImm();
+  bool IsNarrowStore = isNarrowStore(Opc);
+
+  // For narrow stores, find only the case where the stored value is WZR.
+  if (IsNarrowStore && Reg != AArch64::WZR)
+    return E;
 
   // Early exit if the first instruction modifies the base register.
   // e.g., ldr x0, [x0]
@@ -840,7 +894,8 @@ AArch64LoadStoreOpt::findMatchingInsn(Ma
   // range, plus allow an extra one in case we find a later insn that matches
   // with Offset-1)
   int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
-  if (!isNarrowLoad(Opc) && !inBoundsForPair(IsUnscaled, Offset, OffsetStride))
+  if (!(isNarrowLoad(Opc) || IsNarrowStore) &&
+      !inBoundsForPair(IsUnscaled, Offset, OffsetStride))
     return E;
 
   // Track which registers have been modified and used between the first insn
@@ -907,9 +962,9 @@ AArch64LoadStoreOpt::findMatchingInsn(Ma
           continue;
         }
 
-        if (IsNarrowLoad) {
-          // If the alignment requirements of the larger type scaled load
-          // instruction can't express the scaled offset of the smaller type
+        if (IsNarrowLoad || IsNarrowStore) {
+          // If the alignment requirements of the scaled wide load/store
+          // instruction can't express the offset of the scaled narrow
           // input, bail and keep looking.
           if (!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) {
             trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
@@ -929,7 +984,10 @@ AArch64LoadStoreOpt::findMatchingInsn(Ma
         // If the destination register of the loads is the same register, bail
         // and keep looking. A load-pair instruction with both destination
         // registers the same is UNPREDICTABLE and will result in an exception.
-        if (MayLoad && Reg == getLdStRegOp(MI).getReg()) {
+        // For narrow stores, allow only when the stored value is the same
+        // (i.e., WZR).
+        if ((MayLoad && Reg == getLdStRegOp(MI).getReg()) ||
+            (IsNarrowStore && Reg != getLdStRegOp(MI).getReg())) {
           trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
           MemInsns.push_back(MI);
           continue;
@@ -1228,6 +1286,8 @@ bool AArch64LoadStoreOpt::tryToMergeLdSt
   if (Paired != E) {
     if (isNarrowLoad(MI)) {
       ++NumNarrowLoadsPromoted;
+    } else if (isNarrowStore(MI)) {
+      ++NumZeroStoresPromoted;
     } else {
       ++NumPairCreated;
       if (isUnscaledLdSt(MI))
@@ -1284,11 +1344,15 @@ bool AArch64LoadStoreOpt::optimizeBlock(
     case AArch64::LDRHHui:
     case AArch64::LDRSBWui:
     case AArch64::LDRSHWui:
+    case AArch64::STRBBui:
+    case AArch64::STRHHui:
     // Unscaled instructions.
     case AArch64::LDURBBi:
     case AArch64::LDURHHi:
     case AArch64::LDURSBWi:
-    case AArch64::LDURSHWi: {
+    case AArch64::LDURSHWi:
+    case AArch64::STURBBi:
+    case AArch64::STURHHi: {
       if (tryToMergeLdStInst(MBBI)) {
         Modified = true;
         break;

Removed: llvm/trunk/test/CodeGen/AArch64/arm64-ldr-merge.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-ldr-merge.ll?rev=253710&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-ldr-merge.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-ldr-merge.ll (removed)
@@ -1,318 +0,0 @@
-; RUN: llc < %s -mtriple aarch64--none-eabi -mcpu=cortex-a57 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=LE
-; RUN: llc < %s -mtriple aarch64_be--none-eabi -mcpu=cortex-a57 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=BE
-
-; CHECK-LABEL: Ldrh_merge
-; CHECK-NOT: ldrh
-; CHECK: ldr [[NEW_DEST:w[0-9]+]]
-; CHECK-DAG: and [[LO_PART:w[0-9]+]], [[NEW_DEST]], #0xffff
-; CHECK-DAG: lsr [[HI_PART:w[0-9]+]], [[NEW_DEST]], #16
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i16 @Ldrh_merge(i16* nocapture readonly %p) {
-  %1 = load i16, i16* %p, align 2
-  %arrayidx2 = getelementptr inbounds i16, i16* %p, i64 1
-  %2 = load i16, i16* %arrayidx2, align 2
-  %add = sub nuw nsw i16 %1, %2
-  ret i16 %add
-}
-
-; CHECK-LABEL: Ldurh_merge
-; CHECK-NOT: ldurh
-; CHECK: ldur [[NEW_DEST:w[0-9]+]]
-; CHECK-DAG: and [[LO_PART:w[0-9]+]], [[NEW_DEST]], #0xffff
-; CHECK-DAG: lsr  [[HI_PART:w[0-9]+]], [[NEW_DEST]]
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i16 @Ldurh_merge(i16* nocapture readonly %p)  {
-entry:
-  %arrayidx = getelementptr inbounds i16, i16* %p, i64 -2
-  %0 = load i16, i16* %arrayidx
-  %arrayidx3 = getelementptr inbounds i16, i16* %p, i64 -1
-  %1 = load i16, i16* %arrayidx3
-  %add = sub nuw nsw i16 %0, %1
-  ret i16 %add
-}
-
-; CHECK-LABEL: Ldrh_4_merge
-; CHECK-NOT: ldrh
-; CHECK: ldp [[WORD1:w[0-9]+]], [[WORD2:w[0-9]+]], [x0]
-; CHECK-DAG: and [[WORD1LO:w[0-9]+]], [[WORD1]], #0xffff
-; CHECK-DAG: lsr [[WORD1HI:w[0-9]+]], [[WORD1]], #16
-; CHECK-DAG: and [[WORD2LO:w[0-9]+]], [[WORD2]], #0xffff
-; CHECK-DAG: lsr [[WORD2HI:w[0-9]+]], [[WORD2]], #16
-; LE-DAG: sub [[TEMP1:w[0-9]+]], [[WORD1HI]], [[WORD1LO]]
-; BE-DAG: sub [[TEMP1:w[0-9]+]], [[WORD1LO]], [[WORD1HI]]
-; LE: udiv [[TEMP2:w[0-9]+]], [[TEMP1]], [[WORD2LO]]
-; BE: udiv [[TEMP2:w[0-9]+]], [[TEMP1]], [[WORD2HI]]
-; LE: sub w0, [[TEMP2]], [[WORD2HI]]
-; BE: sub w0, [[TEMP2]], [[WORD2LO]]
-define i16 @Ldrh_4_merge(i16* nocapture readonly %P) {
-  %arrayidx = getelementptr inbounds i16, i16* %P, i64 0
-  %l0 = load i16, i16* %arrayidx
-  %arrayidx2 = getelementptr inbounds i16, i16* %P, i64 1
-  %l1 = load i16, i16* %arrayidx2
-  %arrayidx7 = getelementptr inbounds i16, i16* %P, i64 2
-  %l2 = load i16, i16* %arrayidx7
-  %arrayidx12 = getelementptr inbounds i16, i16* %P, i64 3
-  %l3 = load i16, i16* %arrayidx12
-  %add4 = sub nuw nsw i16 %l1, %l0
-  %add9 = udiv i16 %add4, %l2
-  %add14 = sub nuw nsw i16 %add9, %l3
-  ret i16 %add14
-}
-
-; CHECK-LABEL: Ldrsh_merge
-; CHECK: ldr [[NEW_DEST:w[0-9]+]]
-; CHECK-DAG: asr [[LO_PART:w[0-9]+]], [[NEW_DEST]], #16
-; CHECK-DAG: sxth [[HI_PART:w[0-9]+]], [[NEW_DEST]]
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-
-define i32 @Ldrsh_merge(i16* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i16, i16* %p, i64 4
-  %tmp = load i16, i16* %add.ptr0
-  %add.ptr = getelementptr inbounds i16, i16* %p, i64 5
-  %tmp1 = load i16, i16* %add.ptr
-  %sexttmp = sext i16 %tmp to i32
-  %sexttmp1 = sext i16 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp1, %sexttmp
-  ret i32 %add
-}
-
-; CHECK-LABEL: Ldrsh_zsext_merge
-; CHECK: ldr [[NEW_DEST:w[0-9]+]]
-; LE-DAG: and [[LO_PART:w[0-9]+]], [[NEW_DEST]], #0xffff
-; LE-DAG: asr [[HI_PART:w[0-9]+]], [[NEW_DEST]], #16
-; BE-DAG: sxth [[LO_PART:w[0-9]+]], [[NEW_DEST]]
-; BE-DAG: lsr [[HI_PART:w[0-9]+]], [[NEW_DEST]], #16
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i32 @Ldrsh_zsext_merge(i16* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i16, i16* %p, i64 4
-  %tmp = load i16, i16* %add.ptr0
-  %add.ptr = getelementptr inbounds i16, i16* %p, i64 5
-  %tmp1 = load i16, i16* %add.ptr
-  %sexttmp = zext i16 %tmp to i32
-  %sexttmp1 = sext i16 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp, %sexttmp1
-  ret i32 %add
-}
-
-; CHECK-LABEL: Ldrsh_szext_merge
-; CHECK: ldr [[NEW_DEST:w[0-9]+]]
-; LE-DAG: sxth [[LO_PART:w[0-9]+]], [[NEW_DEST]]
-; LE-DAG: lsr [[HI_PART:w[0-9]+]], [[NEW_DEST]], #16
-; BE-DAG: and [[LO_PART:w[0-9]+]], [[NEW_DEST]], #0xffff
-; BE-DAG: asr [[HI_PART:w[0-9]+]], [[NEW_DEST]], #16
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i32 @Ldrsh_szext_merge(i16* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i16, i16* %p, i64 4
-  %tmp = load i16, i16* %add.ptr0
-  %add.ptr = getelementptr inbounds i16, i16* %p, i64 5
-  %tmp1 = load i16, i16* %add.ptr
-  %sexttmp = sext i16 %tmp to i32
-  %sexttmp1 = zext i16 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp, %sexttmp1
-  ret i32 %add
-}
-
-; CHECK-LABEL: Ldrb_merge
-; CHECK: ldrh [[NEW_DEST:w[0-9]+]]
-; CHECK-DAG: and [[LO_PART:w[0-9]+]], [[NEW_DEST]], #0xff
-; CHECK-DAG: ubfx [[HI_PART:w[0-9]+]], [[NEW_DEST]], #8, #8
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i32 @Ldrb_merge(i8* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 2
-  %tmp = load i8, i8* %add.ptr0
-  %add.ptr = getelementptr inbounds i8, i8* %p, i64 3
-  %tmp1 = load i8, i8* %add.ptr
-  %sexttmp = zext i8 %tmp to i32
-  %sexttmp1 = zext i8 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp, %sexttmp1
-  ret i32 %add
-}
-
-; CHECK-LABEL: Ldrsb_merge
-; CHECK: ldrh [[NEW_DEST:w[0-9]+]]
-; CHECK-DAG: sxtb [[LO_PART:w[0-9]+]], [[NEW_DEST]]
-; CHECK-DAG: sbfx [[HI_PART:w[0-9]+]], [[NEW_DEST]], #8, #8
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i32 @Ldrsb_merge(i8* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 2
-  %tmp = load i8, i8* %add.ptr0
-  %add.ptr = getelementptr inbounds i8, i8* %p, i64 3
-  %tmp1 = load i8, i8* %add.ptr
-  %sexttmp = sext i8 %tmp to i32
-  %sexttmp1 = sext i8 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp, %sexttmp1
-  ret i32 %add
-}
-
-; CHECK-LABEL: Ldrsb_zsext_merge
-; CHECK: ldrh [[NEW_DEST:w[0-9]+]]
-; LE-DAG: and [[LO_PART:w[0-9]+]], [[NEW_DEST]], #0xff
-; LE-DAG: sbfx [[HI_PART:w[0-9]+]], [[NEW_DEST]], #8, #8
-; BE-DAG: sxtb [[LO_PART:w[0-9]+]], [[NEW_DEST]]
-; BE-DAG: ubfx [[HI_PART:w[0-9]+]], [[NEW_DEST]], #8, #8
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i32 @Ldrsb_zsext_merge(i8* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 2
-  %tmp = load i8, i8* %add.ptr0
-  %add.ptr = getelementptr inbounds i8, i8* %p, i64 3
-  %tmp1 = load i8, i8* %add.ptr
-  %sexttmp = zext i8 %tmp to i32
-  %sexttmp1 = sext i8 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp, %sexttmp1
-  ret i32 %add
-}
-
-; CHECK-LABEL: Ldrsb_szext_merge
-; CHECK: ldrh [[NEW_DEST:w[0-9]+]]
-; LE-DAG: sxtb [[LO_PART:w[0-9]+]], [[NEW_DEST]]
-; LE-DAG: ubfx [[HI_PART:w[0-9]+]], [[NEW_DEST]], #8, #8
-; BE-DAG: and [[LO_PART:w[0-9]+]], [[NEW_DEST]], #0xff
-; BE-DAG: sbfx [[HI_PART:w[0-9]+]], [[NEW_DEST]], #8, #8
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i32 @Ldrsb_szext_merge(i8* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 2
-  %tmp = load i8, i8* %add.ptr0
-  %add.ptr = getelementptr inbounds i8, i8* %p, i64 3
-  %tmp1 = load i8, i8* %add.ptr
-  %sexttmp = sext i8 %tmp to i32
-  %sexttmp1 = zext i8 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp, %sexttmp1
-  ret i32 %add
-}
-
-; CHECK-LABEL: Ldursh_merge
-; CHECK: ldur [[NEW_DEST:w[0-9]+]]
-; CHECK-DAG: asr  [[LO_PART:w[0-9]+]], [[NEW_DEST]], #16
-; CHECK-DAG: sxth [[HI_PART:w[0-9]+]], [[NEW_DEST]]
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i32 @Ldursh_merge(i16* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i16, i16* %p, i64 -1
-  %tmp = load i16, i16* %add.ptr0
-  %add.ptr = getelementptr inbounds i16, i16* %p, i64 -2
-  %tmp1 = load i16, i16* %add.ptr
-  %sexttmp = sext i16 %tmp to i32
-  %sexttmp1 = sext i16 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp, %sexttmp1
-  ret i32 %add
-}
-
-; CHECK-LABEL: Ldursh_zsext_merge
-; CHECK: ldur [[NEW_DEST:w[0-9]+]]
-; LE-DAG: lsr  [[LO_PART:w[0-9]+]], [[NEW_DEST]], #16
-; LE-DAG: sxth [[HI_PART:w[0-9]+]], [[NEW_DEST]]
-; BE-DAG: asr  [[LO_PART:w[0-9]+]], [[NEW_DEST]], #16
-; BE-DAG: and [[HI_PART:w[0-9]+]], [[NEW_DEST]], #0xffff
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i32 @Ldursh_zsext_merge(i16* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i16, i16* %p, i64 -1
-  %tmp = load i16, i16* %add.ptr0
-  %add.ptr = getelementptr inbounds i16, i16* %p, i64 -2
-  %tmp1 = load i16, i16* %add.ptr
-  %sexttmp = zext i16 %tmp to i32
-  %sexttmp1 = sext i16 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp, %sexttmp1
-  ret i32 %add
-}
-
-; CHECK-LABEL: Ldursh_szext_merge
-; CHECK: ldur [[NEW_DEST:w[0-9]+]]
-; LE-DAG: asr  [[LO_PART:w[0-9]+]], [[NEW_DEST]], #16
-; LE-DAG: and [[HI_PART:w[0-9]+]], [[NEW_DEST]], #0xffff
-; BE-DAG: lsr  [[LO_PART:w[0-9]+]], [[NEW_DEST]], #16
-; BE-DAG: sxth [[HI_PART:w[0-9]+]], [[NEW_DEST]]
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i32 @Ldursh_szext_merge(i16* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i16, i16* %p, i64 -1
-  %tmp = load i16, i16* %add.ptr0
-  %add.ptr = getelementptr inbounds i16, i16* %p, i64 -2
-  %tmp1 = load i16, i16* %add.ptr
-  %sexttmp = sext i16 %tmp to i32
-  %sexttmp1 = zext i16 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp, %sexttmp1
-  ret i32 %add
-}
-
-; CHECK-LABEL: Ldurb_merge
-; CHECK: ldurh [[NEW_DEST:w[0-9]+]]
-; CHECK-DAG: ubfx  [[LO_PART:w[0-9]+]], [[NEW_DEST]], #8, #8
-; CHECK-DAG: and [[HI_PART:w[0-9]+]], [[NEW_DEST]], #0xff
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i32 @Ldurb_merge(i8* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 -1
-  %tmp = load i8, i8* %add.ptr0
-  %add.ptr = getelementptr inbounds i8, i8* %p, i64 -2
-  %tmp1 = load i8, i8* %add.ptr
-  %sexttmp = zext i8 %tmp to i32
-  %sexttmp1 = zext i8 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp, %sexttmp1
-  ret i32 %add
-}
-
-; CHECK-LABEL: Ldursb_merge
-; CHECK: ldurh [[NEW_DEST:w[0-9]+]]
-; CHECK-DAG: sbfx [[LO_PART:w[0-9]+]], [[NEW_DEST]], #8, #8
-; CHECK-DAG: sxtb [[HI_PART:w[0-9]+]], [[NEW_DEST]]
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i32 @Ldursb_merge(i8* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 -1
-  %tmp = load i8, i8* %add.ptr0
-  %add.ptr = getelementptr inbounds i8, i8* %p, i64 -2
-  %tmp1 = load i8, i8* %add.ptr
-  %sexttmp = sext i8 %tmp to i32
-  %sexttmp1 = sext i8 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp, %sexttmp1
-  ret i32 %add
-}
-
-; CHECK-LABEL: Ldursb_zsext_merge
-; CHECK: ldurh [[NEW_DEST:w[0-9]+]]
-; LE-DAG: ubfx [[LO_PART:w[0-9]+]], [[NEW_DEST]], #8, #8
-; LE-DAG: sxtb [[HI_PART:w[0-9]+]], [[NEW_DEST]]
-; BE-DAG: sbfx [[LO_PART:w[0-9]+]], [[NEW_DEST]], #8, #8
-; BE-DAG: and [[HI_PART:w[0-9]+]], [[NEW_DEST]], #0xff
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i32 @Ldursb_zsext_merge(i8* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 -1
-  %tmp = load i8, i8* %add.ptr0
-  %add.ptr = getelementptr inbounds i8, i8* %p, i64 -2
-  %tmp1 = load i8, i8* %add.ptr
-  %sexttmp = zext i8 %tmp to i32
-  %sexttmp1 = sext i8 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp, %sexttmp1
-  ret i32 %add
-}
-
-; CHECK-LABEL: Ldursb_szext_merge
-; CHECK: ldurh [[NEW_DEST:w[0-9]+]]
-; LE-DAG: sbfx [[LO_PART:w[0-9]+]], [[NEW_DEST]], #8, #8
-; LE-DAG: and [[HI_PART:w[0-9]+]], [[NEW_DEST]], #0xff
-; BE-DAG: ubfx [[LO_PART:w[0-9]+]], [[NEW_DEST]], #8, #8
-; BE-DAG: sxtb [[HI_PART:w[0-9]+]], [[NEW_DEST]]
-; LE: sub {{w[0-9]+}}, [[LO_PART]], [[HI_PART]]
-; BE: sub {{w[0-9]+}}, [[HI_PART]], [[LO_PART]]
-define i32 @Ldursb_szext_merge(i8* %p) nounwind {
-  %add.ptr0 = getelementptr inbounds i8, i8* %p, i64 -1
-  %tmp = load i8, i8* %add.ptr0
-  %add.ptr = getelementptr inbounds i8, i8* %p, i64 -2
-  %tmp1 = load i8, i8* %add.ptr
-  %sexttmp = sext i8 %tmp to i32
-  %sexttmp1 = zext i8 %tmp1 to i32
-  %add = sub nsw i32 %sexttmp, %sexttmp1
-  ret i32 %add
-}
-

Copied: llvm/trunk/test/CodeGen/AArch64/arm64-narrow-ldst-merge.ll (from r253698, llvm/trunk/test/CodeGen/AArch64/arm64-ldr-merge.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-narrow-ldst-merge.ll?p2=llvm/trunk/test/CodeGen/AArch64/arm64-narrow-ldst-merge.ll&p1=llvm/trunk/test/CodeGen/AArch64/arm64-ldr-merge.ll&r1=253698&r2=253711&rev=253711&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-ldr-merge.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-narrow-ldst-merge.ll Fri Nov 20 15:14:07 2015
@@ -316,3 +316,91 @@ define i32 @Ldursb_szext_merge(i8* %p) n
   ret i32 %add
 }
 
+; CHECK-LABEL: Strh_zero
+; CHECK: str wzr
+define void @Strh_zero(i16* nocapture %P, i32 %n) {
+entry:
+ %idxprom = sext i32 %n to i64
+  %arrayidx = getelementptr inbounds i16, i16* %P, i64 %idxprom
+ store i16 0, i16* %arrayidx
+  %add = add nsw i32 %n, 1
+  %idxprom1 = sext i32 %add to i64
+  %arrayidx2 = getelementptr inbounds i16, i16* %P, i64 %idxprom1
+  store i16 0, i16* %arrayidx2
+  ret void
+}
+
+; CHECK-LABEL: Strh_zero_4
+; CHECK: stp wzr, wzr
+define void @Strh_zero_4(i16* nocapture %P, i32 %n) {
+entry:
+  %idxprom = sext i32 %n to i64
+  %arrayidx = getelementptr inbounds i16, i16* %P, i64 %idxprom
+  store i16 0, i16* %arrayidx
+  %add = add nsw i32 %n, 1
+  %idxprom1 = sext i32 %add to i64
+  %arrayidx2 = getelementptr inbounds i16, i16* %P, i64 %idxprom1
+  store i16 0, i16* %arrayidx2
+  %add3 = add nsw i32 %n, 2
+  %idxprom4 = sext i32 %add3 to i64
+  %arrayidx5 = getelementptr inbounds i16, i16* %P, i64 %idxprom4
+  store i16 0, i16* %arrayidx5
+  %add6 = add nsw i32 %n, 3
+  %idxprom7 = sext i32 %add6 to i64
+  %arrayidx8 = getelementptr inbounds i16, i16* %P, i64 %idxprom7
+  store i16 0, i16* %arrayidx8
+  ret void
+}
+
+; CHECK-LABEL: Sturb_zero
+; CHECK: sturh wzr
+define void @Sturb_zero(i8* nocapture %P, i32 %n) #0 {
+entry:
+  %sub = add nsw i32 %n, -2
+  %idxprom = sext i32 %sub to i64
+  %arrayidx = getelementptr inbounds i8, i8* %P, i64 %idxprom
+  store i8 0, i8* %arrayidx
+  %sub2= add nsw i32 %n, -1
+  %idxprom1 = sext i32 %sub2 to i64
+  %arrayidx2 = getelementptr inbounds i8, i8* %P, i64 %idxprom1
+  store i8 0, i8* %arrayidx2
+  ret void
+}
+
+; CHECK-LABEL: Sturh_zero
+; CHECK: stur wzr
+define void @Sturh_zero(i16* nocapture %P, i32 %n) {
+entry:
+  %sub = add nsw i32 %n, -2
+  %idxprom = sext i32 %sub to i64
+  %arrayidx = getelementptr inbounds i16, i16* %P, i64 %idxprom
+  store i16 0, i16* %arrayidx
+  %sub1 = add nsw i32 %n, -3
+  %idxprom2 = sext i32 %sub1 to i64
+  %arrayidx3 = getelementptr inbounds i16, i16* %P, i64 %idxprom2
+  store i16 0, i16* %arrayidx3
+  ret void
+}
+
+; CHECK-LABEL: Sturh_zero_4
+; CHECK: stp wzr, wzr
+define void @Sturh_zero_4(i16* nocapture %P, i32 %n) {
+entry:
+  %sub = add nsw i32 %n, -3
+  %idxprom = sext i32 %sub to i64
+  %arrayidx = getelementptr inbounds i16, i16* %P, i64 %idxprom
+  store i16 0, i16* %arrayidx
+  %sub1 = add nsw i32 %n, -4
+  %idxprom2 = sext i32 %sub1 to i64
+  %arrayidx3 = getelementptr inbounds i16, i16* %P, i64 %idxprom2
+  store i16 0, i16* %arrayidx3
+  %sub4 = add nsw i32 %n, -2
+  %idxprom5 = sext i32 %sub4 to i64
+  %arrayidx6 = getelementptr inbounds i16, i16* %P, i64 %idxprom5
+  store i16 0, i16* %arrayidx6
+  %sub7 = add nsw i32 %n, -1
+  %idxprom8 = sext i32 %sub7 to i64
+  %arrayidx9 = getelementptr inbounds i16, i16* %P, i64 %idxprom8
+  store i16 0, i16* %arrayidx9
+  ret void
+}




More information about the llvm-commits mailing list