[llvm-branch-commits] [llvm] d77b786 - Revert "[AArch64][SVE] Pair SVE fill/spill into LDP/STP with -msve-vector-bit…"

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Apr 9 05:29:45 PDT 2025


Author: David Spickett
Date: 2025-04-09T13:29:42+01:00
New Revision: d77b786ac57008f734cd5552d59c6be34fd05c77

URL: https://github.com/llvm/llvm-project/commit/d77b786ac57008f734cd5552d59c6be34fd05c77
DIFF: https://github.com/llvm/llvm-project/commit/d77b786ac57008f734cd5552d59c6be34fd05c77.diff

LOG: Revert "[AArch64][SVE] Pair SVE fill/spill into LDP/STP with -msve-vector-bit…"

This reverts commit c80080ff7e105eb42d486ed473fa9c82fb518b0a.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
    llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp

Removed: 
    llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 74217fad82a7e..d370f8c7ff6ea 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -2760,9 +2760,6 @@ bool AArch64InstrInfo::isPairableLdStInst(const MachineInstr &MI) {
   case AArch64::LDRXpre:
   case AArch64::LDURSWi:
   case AArch64::LDRSWpre:
-  // SVE instructions.
-  case AArch64::LDR_ZXI:
-  case AArch64::STR_ZXI:
     return true;
   }
 }
@@ -2915,18 +2912,6 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const {
       return false;
   }
 
-  // Pairing SVE fills/spills is only valid for little-endian targets that
-  // implement VLS 128.
-  switch (MI.getOpcode()) {
-  default:
-    break;
-  case AArch64::LDR_ZXI:
-  case AArch64::STR_ZXI:
-    if (!Subtarget.isLittleEndian() ||
-        Subtarget.getSVEVectorSizeInBits() != 128)
-      return false;
-  }
-
   // Check if this load/store has a hint to avoid pair formation.
   // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
   if (isLdStPairSuppressed(MI))

diff  --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 68ed68b9e32bc..06e633effe874 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -298,7 +298,6 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc,
   case AArch64::STRXui:
   case AArch64::STRXpre:
   case AArch64::STURXi:
-  case AArch64::STR_ZXI:
   case AArch64::LDRDui:
   case AArch64::LDURDi:
   case AArch64::LDRDpre:
@@ -317,7 +316,6 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc,
   case AArch64::LDRSui:
   case AArch64::LDURSi:
   case AArch64::LDRSpre:
-  case AArch64::LDR_ZXI:
     return Opc;
   case AArch64::LDRSWui:
     return AArch64::LDRWui;
@@ -363,7 +361,6 @@ static unsigned getMatchingPairOpcode(unsigned Opc) {
     return AArch64::STPDpre;
   case AArch64::STRQui:
   case AArch64::STURQi:
-  case AArch64::STR_ZXI:
     return AArch64::STPQi;
   case AArch64::STRQpre:
     return AArch64::STPQpre;
@@ -389,7 +386,6 @@ static unsigned getMatchingPairOpcode(unsigned Opc) {
     return AArch64::LDPDpre;
   case AArch64::LDRQui:
   case AArch64::LDURQi:
-  case AArch64::LDR_ZXI:
     return AArch64::LDPQi;
   case AArch64::LDRQpre:
     return AArch64::LDPQpre;
@@ -1229,16 +1225,6 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
     (void)MIBSXTW;
     LLVM_DEBUG(dbgs() << "  Extend operand:\n    ");
     LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
-  } else if (Opc == AArch64::LDR_ZXI || Opc == AArch64::STR_ZXI) {
-    // We are combining SVE fill/spill to LDP/STP, so we need to use the Q
-    // variant of the registers.
-    MachineOperand &MOp0 = MIB->getOperand(0);
-    MachineOperand &MOp1 = MIB->getOperand(1);
-    assert(AArch64::ZPRRegClass.contains(MOp0.getReg()) &&
-           AArch64::ZPRRegClass.contains(MOp1.getReg()) && "Invalid register.");
-    MOp0.setReg(AArch64::Q0 + (MOp0.getReg() - AArch64::Z0));
-    MOp1.setReg(AArch64::Q0 + (MOp1.getReg() - AArch64::Z0));
-    LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
   } else {
     LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
   }
@@ -2673,8 +2659,7 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
       // Get the needed alignments to check them if
       // ldp-aligned-only/stp-aligned-only features are opted.
       uint64_t MemAlignment = MemOp->getAlign().value();
-      uint64_t TypeAlignment =
-          Align(MemOp->getSize().getValue().getKnownMinValue()).value();
+      uint64_t TypeAlignment = Align(MemOp->getSize().getValue()).value();
 
       if (MemAlignment < 2 * TypeAlignment) {
         NumFailedAlignmentCheck++;
@@ -2835,18 +2820,11 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
     }
   // 3) Find loads and stores that can be merged into a single load or store
   //    pair instruction.
-  //    When compiling for SVE 128, also try to combine SVE fill/spill
-  //    instructions into LDP/STP.
   //      e.g.,
   //        ldr x0, [x2]
   //        ldr x1, [x2, #8]
   //        ; becomes
   //        ldp x0, x1, [x2]
-  //      e.g.,
-  //        ldr z0, [x2]
-  //        ldr z1, [x2, #1, mul vl]
-  //        ; becomes
-  //        ldp q0, q1, [x2]
 
   if (MBB.getParent()->getRegInfo().tracksLiveness()) {
     DefinedInBB.clear();

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll
deleted file mode 100644
index 503ead4eba2db..0000000000000
--- a/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll
+++ /dev/null
@@ -1,283 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s
-; RUN: llc -verify-machineinstrs -mtriple=aarch64_be-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-BE
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,ldp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-LDPALIGNEDONLY
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,stp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-STPALIGNEDONLY
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK-OFF
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=256 -aarch64-sve-vector-bits-max=256 < %s | FileCheck %s --check-prefixes=CHECK-OFF
-
-define void @nxv16i8(ptr %ldptr, ptr %stptr) {
-; CHECK-LABEL: nxv16i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    stp q0, q1, [x1]
-; CHECK-NEXT:    ret
-;
-; CHECK-BE-LABEL: nxv16i8:
-; CHECK-BE:       // %bb.0:
-; CHECK-BE-NEXT:    ptrue p0.b
-; CHECK-BE-NEXT:    ld1b { z0.b }, p0/z, [x0]
-; CHECK-BE-NEXT:    ld1b { z1.b }, p0/z, [x0, #1, mul vl]
-; CHECK-BE-NEXT:    st1b { z0.b }, p0, [x1]
-; CHECK-BE-NEXT:    st1b { z1.b }, p0, [x1, #1, mul vl]
-; CHECK-BE-NEXT:    ret
-;
-; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8:
-; CHECK-LDPALIGNEDONLY:       // %bb.0:
-; CHECK-LDPALIGNEDONLY-NEXT:    ldr z0, [x0]
-; CHECK-LDPALIGNEDONLY-NEXT:    ldr z1, [x0, #1, mul vl]
-; CHECK-LDPALIGNEDONLY-NEXT:    stp q0, q1, [x1]
-; CHECK-LDPALIGNEDONLY-NEXT:    ret
-;
-; CHECK-STPALIGNEDONLY-LABEL: nxv16i8:
-; CHECK-STPALIGNEDONLY:       // %bb.0:
-; CHECK-STPALIGNEDONLY-NEXT:    ldp q0, q1, [x0]
-; CHECK-STPALIGNEDONLY-NEXT:    str z0, [x1]
-; CHECK-STPALIGNEDONLY-NEXT:    str z1, [x1, #1, mul vl]
-; CHECK-STPALIGNEDONLY-NEXT:    ret
-;
-; CHECK-OFF-LABEL: nxv16i8:
-; CHECK-OFF:       // %bb.0:
-; CHECK-OFF-NEXT:    ldr z0, [x0]
-; CHECK-OFF-NEXT:    ldr z1, [x0, #1, mul vl]
-; CHECK-OFF-NEXT:    str z0, [x1]
-; CHECK-OFF-NEXT:    str z1, [x1, #1, mul vl]
-; CHECK-OFF-NEXT:    ret
-  %vscale = tail call i64 @llvm.vscale()
-  %vl = shl nuw nsw i64 %vscale, 4
-  %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
-  %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
-  %ld1 = load <vscale x 16 x i8>, ptr %ldptr, align 1
-  %ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
-  store <vscale x 16 x i8> %ld1, ptr %stptr, align 1
-  store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
-  ret void
-}
-
-define void @nxv16i8_max_range(ptr %ldptr, ptr %stptr) {
-; CHECK-LABEL: nxv16i8_max_range:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q0, q1, [x0, #-1024]
-; CHECK-NEXT:    stp q0, q1, [x1, #1008]
-; CHECK-NEXT:    ret
-;
-; CHECK-BE-LABEL: nxv16i8_max_range:
-; CHECK-BE:       // %bb.0:
-; CHECK-BE-NEXT:    rdvl x8, #1
-; CHECK-BE-NEXT:    mov x9, #-1008 // =0xfffffffffffffc10
-; CHECK-BE-NEXT:    mov x10, #-1024 // =0xfffffffffffffc00
-; CHECK-BE-NEXT:    lsr x8, x8, #4
-; CHECK-BE-NEXT:    mov w11, #1008 // =0x3f0
-; CHECK-BE-NEXT:    mov w12, #1024 // =0x400
-; CHECK-BE-NEXT:    ptrue p0.b
-; CHECK-BE-NEXT:    mul x9, x8, x9
-; CHECK-BE-NEXT:    mul x10, x8, x10
-; CHECK-BE-NEXT:    mul x11, x8, x11
-; CHECK-BE-NEXT:    ld1b { z1.b }, p0/z, [x0, x9]
-; CHECK-BE-NEXT:    mul x8, x8, x12
-; CHECK-BE-NEXT:    ld1b { z0.b }, p0/z, [x0, x10]
-; CHECK-BE-NEXT:    st1b { z0.b }, p0, [x1, x11]
-; CHECK-BE-NEXT:    st1b { z1.b }, p0, [x1, x8]
-; CHECK-BE-NEXT:    ret
-;
-; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_max_range:
-; CHECK-LDPALIGNEDONLY:       // %bb.0:
-; CHECK-LDPALIGNEDONLY-NEXT:    ldr z0, [x0, #-64, mul vl]
-; CHECK-LDPALIGNEDONLY-NEXT:    ldr z1, [x0, #-63, mul vl]
-; CHECK-LDPALIGNEDONLY-NEXT:    stp q0, q1, [x1, #1008]
-; CHECK-LDPALIGNEDONLY-NEXT:    ret
-;
-; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_max_range:
-; CHECK-STPALIGNEDONLY:       // %bb.0:
-; CHECK-STPALIGNEDONLY-NEXT:    ldp q0, q1, [x0, #-1024]
-; CHECK-STPALIGNEDONLY-NEXT:    str z0, [x1, #63, mul vl]
-; CHECK-STPALIGNEDONLY-NEXT:    str z1, [x1, #64, mul vl]
-; CHECK-STPALIGNEDONLY-NEXT:    ret
-;
-; CHECK-OFF-LABEL: nxv16i8_max_range:
-; CHECK-OFF:       // %bb.0:
-; CHECK-OFF-NEXT:    ldr z0, [x0, #-64, mul vl]
-; CHECK-OFF-NEXT:    ldr z1, [x0, #-63, mul vl]
-; CHECK-OFF-NEXT:    str z0, [x1, #63, mul vl]
-; CHECK-OFF-NEXT:    str z1, [x1, #64, mul vl]
-; CHECK-OFF-NEXT:    ret
-  %vscale = tail call i64 @llvm.vscale()
-  %ldoff1 = mul i64 %vscale, -1024
-  %ldoff2 = mul i64 %vscale, -1008
-  %stoff1 = mul i64 %vscale, 1008
-  %stoff2 = mul i64 %vscale, 1024
-  %ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1
-  %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2
-  %stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1
-  %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2
-  %ld1 = load <vscale x 16 x i8>, ptr %ldptr1, align 1
-  %ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
-  store <vscale x 16 x i8> %ld1, ptr %stptr1, align 1
-  store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
-  ret void
-}
-
-define void @nxv16i8_outside_range(ptr %ldptr, ptr %stptr) {
-; CHECK-LABEL: nxv16i8_outside_range:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x0, #-65, mul vl]
-; CHECK-NEXT:    ldr z1, [x0, #-64, mul vl]
-; CHECK-NEXT:    str z0, [x1, #64, mul vl]
-; CHECK-NEXT:    str z1, [x1, #65, mul vl]
-; CHECK-NEXT:    ret
-;
-; CHECK-BE-LABEL: nxv16i8_outside_range:
-; CHECK-BE:       // %bb.0:
-; CHECK-BE-NEXT:    rdvl x8, #1
-; CHECK-BE-NEXT:    mov x9, #-1040 // =0xfffffffffffffbf0
-; CHECK-BE-NEXT:    mov x10, #-1024 // =0xfffffffffffffc00
-; CHECK-BE-NEXT:    lsr x8, x8, #4
-; CHECK-BE-NEXT:    mov w11, #1024 // =0x400
-; CHECK-BE-NEXT:    mov w12, #1040 // =0x410
-; CHECK-BE-NEXT:    ptrue p0.b
-; CHECK-BE-NEXT:    mul x9, x8, x9
-; CHECK-BE-NEXT:    mul x10, x8, x10
-; CHECK-BE-NEXT:    mul x11, x8, x11
-; CHECK-BE-NEXT:    ld1b { z0.b }, p0/z, [x0, x9]
-; CHECK-BE-NEXT:    mul x8, x8, x12
-; CHECK-BE-NEXT:    ld1b { z1.b }, p0/z, [x0, x10]
-; CHECK-BE-NEXT:    st1b { z0.b }, p0, [x1, x11]
-; CHECK-BE-NEXT:    st1b { z1.b }, p0, [x1, x8]
-; CHECK-BE-NEXT:    ret
-;
-; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_outside_range:
-; CHECK-LDPALIGNEDONLY:       // %bb.0:
-; CHECK-LDPALIGNEDONLY-NEXT:    ldr z0, [x0, #-65, mul vl]
-; CHECK-LDPALIGNEDONLY-NEXT:    ldr z1, [x0, #-64, mul vl]
-; CHECK-LDPALIGNEDONLY-NEXT:    str z0, [x1, #64, mul vl]
-; CHECK-LDPALIGNEDONLY-NEXT:    str z1, [x1, #65, mul vl]
-; CHECK-LDPALIGNEDONLY-NEXT:    ret
-;
-; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_outside_range:
-; CHECK-STPALIGNEDONLY:       // %bb.0:
-; CHECK-STPALIGNEDONLY-NEXT:    ldr z0, [x0, #-65, mul vl]
-; CHECK-STPALIGNEDONLY-NEXT:    ldr z1, [x0, #-64, mul vl]
-; CHECK-STPALIGNEDONLY-NEXT:    str z0, [x1, #64, mul vl]
-; CHECK-STPALIGNEDONLY-NEXT:    str z1, [x1, #65, mul vl]
-; CHECK-STPALIGNEDONLY-NEXT:    ret
-;
-; CHECK-OFF-LABEL: nxv16i8_outside_range:
-; CHECK-OFF:       // %bb.0:
-; CHECK-OFF-NEXT:    ldr z0, [x0, #-65, mul vl]
-; CHECK-OFF-NEXT:    ldr z1, [x0, #-64, mul vl]
-; CHECK-OFF-NEXT:    str z0, [x1, #64, mul vl]
-; CHECK-OFF-NEXT:    str z1, [x1, #65, mul vl]
-; CHECK-OFF-NEXT:    ret
-  %vscale = tail call i64 @llvm.vscale()
-  %ldoff1 = mul i64 %vscale, -1040
-  %ldoff2 = mul i64 %vscale, -1024
-  %stoff1 = mul i64 %vscale, 1024
-  %stoff2 = mul i64 %vscale, 1040
-  %ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1
-  %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2
-  %stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1
-  %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2
-  %ld1 = load <vscale x 16 x i8>, ptr %ldptr1, align 1
-  %ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
-  store <vscale x 16 x i8> %ld1, ptr %stptr1, align 1
-  store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
-  ret void
-}
-
-define void @nxv16i8_2vl_stride(ptr %ldptr, ptr %stptr) {
-; CHECK-LABEL: nxv16i8_2vl_stride:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    ldr z1, [x0, #2, mul vl]
-; CHECK-NEXT:    str z0, [x1]
-; CHECK-NEXT:    str z1, [x1, #2, mul vl]
-; CHECK-NEXT:    ret
-;
-; CHECK-BE-LABEL: nxv16i8_2vl_stride:
-; CHECK-BE:       // %bb.0:
-; CHECK-BE-NEXT:    ptrue p0.b
-; CHECK-BE-NEXT:    ld1b { z0.b }, p0/z, [x0]
-; CHECK-BE-NEXT:    ld1b { z1.b }, p0/z, [x0, #2, mul vl]
-; CHECK-BE-NEXT:    st1b { z0.b }, p0, [x1]
-; CHECK-BE-NEXT:    st1b { z1.b }, p0, [x1, #2, mul vl]
-; CHECK-BE-NEXT:    ret
-;
-; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_2vl_stride:
-; CHECK-LDPALIGNEDONLY:       // %bb.0:
-; CHECK-LDPALIGNEDONLY-NEXT:    ldr z0, [x0]
-; CHECK-LDPALIGNEDONLY-NEXT:    ldr z1, [x0, #2, mul vl]
-; CHECK-LDPALIGNEDONLY-NEXT:    str z0, [x1]
-; CHECK-LDPALIGNEDONLY-NEXT:    str z1, [x1, #2, mul vl]
-; CHECK-LDPALIGNEDONLY-NEXT:    ret
-;
-; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_2vl_stride:
-; CHECK-STPALIGNEDONLY:       // %bb.0:
-; CHECK-STPALIGNEDONLY-NEXT:    ldr z0, [x0]
-; CHECK-STPALIGNEDONLY-NEXT:    ldr z1, [x0, #2, mul vl]
-; CHECK-STPALIGNEDONLY-NEXT:    str z0, [x1]
-; CHECK-STPALIGNEDONLY-NEXT:    str z1, [x1, #2, mul vl]
-; CHECK-STPALIGNEDONLY-NEXT:    ret
-;
-; CHECK-OFF-LABEL: nxv16i8_2vl_stride:
-; CHECK-OFF:       // %bb.0:
-; CHECK-OFF-NEXT:    ldr z0, [x0]
-; CHECK-OFF-NEXT:    ldr z1, [x0, #2, mul vl]
-; CHECK-OFF-NEXT:    str z0, [x1]
-; CHECK-OFF-NEXT:    str z1, [x1, #2, mul vl]
-; CHECK-OFF-NEXT:    ret
-  %vscale = tail call i64 @llvm.vscale()
-  %vl = shl nuw nsw i64 %vscale, 5
-  %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
-  %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
-  %ld1 = load <vscale x 16 x i8>, ptr %ldptr, align 1
-  %ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
-  store <vscale x 16 x i8> %ld1, ptr %stptr, align 1
-  store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
-  ret void
-}
-
-define void @nxv2f64_32b_aligned(ptr %ldptr, ptr %stptr) {
-; CHECK-LABEL: nxv2f64_32b_aligned:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    stp q0, q1, [x1]
-; CHECK-NEXT:    ret
-;
-; CHECK-BE-LABEL: nxv2f64_32b_aligned:
-; CHECK-BE:       // %bb.0:
-; CHECK-BE-NEXT:    ptrue p0.d
-; CHECK-BE-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK-BE-NEXT:    ld1d { z1.d }, p0/z, [x0, #1, mul vl]
-; CHECK-BE-NEXT:    st1d { z0.d }, p0, [x1]
-; CHECK-BE-NEXT:    st1d { z1.d }, p0, [x1, #1, mul vl]
-; CHECK-BE-NEXT:    ret
-;
-; CHECK-LDPALIGNEDONLY-LABEL: nxv2f64_32b_aligned:
-; CHECK-LDPALIGNEDONLY:       // %bb.0:
-; CHECK-LDPALIGNEDONLY-NEXT:    ldp q0, q1, [x0]
-; CHECK-LDPALIGNEDONLY-NEXT:    stp q0, q1, [x1]
-; CHECK-LDPALIGNEDONLY-NEXT:    ret
-;
-; CHECK-STPALIGNEDONLY-LABEL: nxv2f64_32b_aligned:
-; CHECK-STPALIGNEDONLY:       // %bb.0:
-; CHECK-STPALIGNEDONLY-NEXT:    ldp q0, q1, [x0]
-; CHECK-STPALIGNEDONLY-NEXT:    stp q0, q1, [x1]
-; CHECK-STPALIGNEDONLY-NEXT:    ret
-;
-; CHECK-OFF-LABEL: nxv2f64_32b_aligned:
-; CHECK-OFF:       // %bb.0:
-; CHECK-OFF-NEXT:    ldr z0, [x0]
-; CHECK-OFF-NEXT:    ldr z1, [x0, #1, mul vl]
-; CHECK-OFF-NEXT:    str z0, [x1]
-; CHECK-OFF-NEXT:    str z1, [x1, #1, mul vl]
-; CHECK-OFF-NEXT:    ret
-  %vscale = tail call i64 @llvm.vscale()
-  %vl = shl nuw nsw i64 %vscale, 4
-  %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
-  %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
-  %ld1 = load <vscale x 2 x double>, ptr %ldptr, align 32
-  %ld2 = load <vscale x 2 x double>, ptr %ldptr2, align 32
-  store <vscale x 2 x double> %ld1, ptr %stptr, align 32
-  store <vscale x 2 x double> %ld2, ptr %stptr2, align 32
-  ret void
-}


        


More information about the llvm-branch-commits mailing list