[clang] [llvm] [AArch64][SVE] Refactor getPTrue to return splat(1) when pattern=all. (PR #139236)

via llvm-commits llvm-commits at lists.llvm.org
Fri May 9 02:56:53 PDT 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-aarch64

Author: Ricardo Jesus (rj-jesus)

<details>
<summary>Changes</summary>

Similarly to #<!-- -->135016, refactor getPTrue to return splat (1) for all-active patterns. The main motivation for this patch is to improve code gen for fixed-length vector loads/stores that are converted to SVE masked memory ops when the vectors are wider than Neon. Emitting the mask as a splat helps DAGCombiner simplify all-active masked loads/stores into unmaked ones, for which it already has suitable combines.

There are four places in AArch64ISelLowering that match against AArch64ISD::PTRUE explicitly. Of these, only one (foldCSELofLASTB) led to test regressions, which I addressed by adding a check for ISD::isConstantSplatVectorAllOnes (I'm not sure if the original intent is to genuinely match any PTRUE node, or if isAllActivePredicate should be used instead). The other three combines (performUnpackCombine, performMSTORECombine and performSetCCPunpkCombine) check for patterns in the range [VL1, VL256], so I believe those should already skip all-active masks.

Given the recent changes, going this route seemed more sensible than replicating the combines from DAGCombiner or adding patterns for all-active masked loads/stores, but I'm happy pursuing either of those approaches (or any other) if they are seen as more appropriate.

---

Patch is 51.26 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/139236.diff


21 Files Affected:

- (modified) clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c (+1-1) 
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+5-4) 
- (modified) llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll (+2-7) 
- (modified) llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll (+1-2) 
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll (+1-1) 
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll (+1-2) 
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-fp-convert.ll (+4-4) 
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll (+12-13) 
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll (+6-7) 
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll (+5-7) 
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-optimize-ptrue.ll (+19-24) 
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll (+18-26) 
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll (+77-101) 
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll (+5-5) 
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll (+10-10) 
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll (+12-18) 
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll (+1-2) 
- (modified) llvm/test/CodeGen/AArch64/sve-insert-vector.ll (+1-5) 
- (modified) llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll (+12-12) 
- (modified) llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll (+2-4) 
- (modified) llvm/test/CodeGen/AArch64/sve-vscale-attr.ll (+9-11) 


``````````diff
diff --git a/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c b/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c
index 1391a1b09fbd1..36c3c7f745a2b 100644
--- a/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c
+++ b/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c
@@ -16,7 +16,7 @@ void func(int *restrict a, int *restrict b) {
 // CHECK256-COUNT-8: str
 // CHECK512-COUNT-4: str
 // CHECK1024-COUNT-2: str
-// CHECK2048-COUNT-1: st1w
+// CHECK2048-COUNT-1: str
 #pragma clang loop vectorize(enable)
   for (int i = 0; i < 64; ++i)
     a[i] += b[i];
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 795ac68e63087..c49ad6e1a1e16 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5725,8 +5725,8 @@ SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
 
 static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT,
                                int Pattern) {
-  if (VT == MVT::nxv1i1 && Pattern == AArch64SVEPredPattern::all)
-    return DAG.getConstant(1, DL, MVT::nxv1i1);
+  if (Pattern == AArch64SVEPredPattern::all)
+    return DAG.getConstant(1, DL, VT);
   return DAG.getNode(AArch64ISD::PTRUE, DL, VT,
                      DAG.getTargetConstant(Pattern, DL, MVT::i32));
 }
@@ -25030,7 +25030,8 @@ static SDValue foldCSELofLASTB(SDNode *Op, SelectionDAG &DAG) {
   if (AnyPred.getOpcode() == AArch64ISD::REINTERPRET_CAST)
     AnyPred = AnyPred.getOperand(0);
 
-  if (TruePred != AnyPred && TruePred.getOpcode() != AArch64ISD::PTRUE)
+  if (TruePred != AnyPred && TruePred.getOpcode() != AArch64ISD::PTRUE &&
+      !ISD::isConstantSplatVectorAllOnes(TruePred.getNode()))
     return SDValue();
 
   SDValue LastB = Op->getOperand(0);
@@ -28568,7 +28569,7 @@ static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT) {
   }
 }
 
-// Return a PTRUE with active lanes corresponding to the extent of VT.
+// Return a predicate with active lanes corresponding to the extent of VT.
 static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
                                                 EVT VT) {
   assert(VT.isFixedLengthVector() &&
diff --git a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
index d5b9d17a98d55..c3322ca38f9e5 100644
--- a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
@@ -208,13 +208,8 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_large_i32(ptr %
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.d, vl8
-; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    str z0, [sp]
-; CHECK-NEXT:    ld1w { z0.d }, p1/z, [x1]
-; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK-NEXT:    ldr z0, [sp]
+; CHECK-NEXT:    ptrue p0.d, vl8
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x1]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
index 965af2a745afd..e10313773c73e 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
@@ -331,8 +331,7 @@ define void @extract_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec, ptr %p) nounwi
 ; CHECK-LABEL: extract_fixed_v4i64_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #32
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %retval = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> %vec, i64 4)
   store <4 x i64> %retval, ptr %p
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll b/llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll
index eaa99239b09e8..ac4c387b70583 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll
@@ -18,7 +18,7 @@ define void @st1d_fixed(ptr %ptr) #0 {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x20]
 ; CHECK-NEXT:    ldr x30, [sp, #128] // 8-byte Folded Reload
-; CHECK-NEXT:    st1d { z0.d }, p0, [x19]
+; CHECK-NEXT:    str z0, [x19]
 ; CHECK-NEXT:    ldp x20, x19, [sp, #144] // 16-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #160
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll
index 55f70b2ffc15b..00002dd3269a2 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll
@@ -544,11 +544,10 @@ define void @extract_subvector_v32f64(ptr %a, ptr %b) vscale_range(16,0) #0 {
 define void @extract_subvector_legalization_v8i32() vscale_range(2,2) #0 {
 ; CHECK-LABEL: extract_subvector_legalization_v8i32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    adrp x8, .LCPI40_0
 ; CHECK-NEXT:    add x8, x8, :lo12:.LCPI40_0
 ; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x8]
+; CHECK-NEXT:    ldr z0, [x8]
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
 ; CHECK-NEXT:    cmeq v0.4s, v0.4s, #0
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-convert.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-convert.ll
index 25876f0ef44af..da1aa4cffe13a 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-convert.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-convert.ll
@@ -7,12 +7,12 @@ target triple = "aarch64-unknown-linux-gnu"
 define void @fp_convert_combine_crash(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fp_convert_combine_crash:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov z0.s, #8.00000000
+; CHECK-NEXT:    ldr z1, [x0]
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    fmov z1.s, #8.00000000
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT:    fmul z0.s, z0.s, z1.s
+; CHECK-NEXT:    fmul z0.s, z1.s, z0.s
 ; CHECK-NEXT:    fcvtzs z0.s, p0/m, z0.s
-; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
+; CHECK-NEXT:    str z0, [x1]
 ; CHECK-NEXT:    ret
   %f = load <8 x float>, ptr %a
   %mul.i = fmul <8 x float> %f, <float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00,
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll
index 1bd688d23050b..72686c3f418e7 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll
@@ -20,32 +20,31 @@ define dso_local void @func1(ptr %v1, ptr %v2, ptr %v3, ptr %v4, ptr %v5, ptr %v
 ; CHECK-NEXT:    .cfi_offset w21, -24
 ; CHECK-NEXT:    .cfi_offset w22, -32
 ; CHECK-NEXT:    .cfi_offset w29, -48
-; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    add x10, sp, #176
 ; CHECK-NEXT:    add x8, sp, #48
 ; CHECK-NEXT:    add x9, sp, #144
-; CHECK-NEXT:    add x20, sp, #176
-; CHECK-NEXT:    ldr x15, [sp, #104]
-; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x10]
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x8]
+; CHECK-NEXT:    ldr z3, [x10]
+; CHECK-NEXT:    ldr z0, [x8]
 ; CHECK-NEXT:    add x8, sp, #112
-; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x9]
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x8]
-; CHECK-NEXT:    ldur q4, [sp, #88]
+; CHECK-NEXT:    ldr z2, [x9]
+; CHECK-NEXT:    ldr z1, [x8]
+; CHECK-NEXT:    add x20, sp, #176
 ; CHECK-NEXT:    ldp x9, x8, [sp, #328]
-; CHECK-NEXT:    ldr x19, [sp, #272]
+; CHECK-NEXT:    ldr x15, [sp, #104]
 ; CHECK-NEXT:    ldp x11, x10, [sp, #312]
+; CHECK-NEXT:    ldur q4, [sp, #88]
 ; CHECK-NEXT:    ldp x13, x12, [sp, #296]
+; CHECK-NEXT:    ldr x19, [sp, #272]
 ; CHECK-NEXT:    ldp x18, x14, [sp, #280]
 ; CHECK-NEXT:    ldp x16, x17, [sp, #208]
 ; CHECK-NEXT:    ldp x21, x22, [sp, #352]
-; CHECK-NEXT:    st1d { z3.d }, p0, [x20]
+; CHECK-NEXT:    str z3, [x20]
 ; CHECK-NEXT:    add x20, sp, #144
-; CHECK-NEXT:    st1d { z2.d }, p0, [x20]
+; CHECK-NEXT:    str z2, [x20]
 ; CHECK-NEXT:    add x20, sp, #112
-; CHECK-NEXT:    st1d { z1.d }, p0, [x20]
+; CHECK-NEXT:    str z1, [x20]
 ; CHECK-NEXT:    add x20, sp, #48
-; CHECK-NEXT:    st1d { z0.d }, p0, [x20]
+; CHECK-NEXT:    str z0, [x20]
 ; CHECK-NEXT:    stp x21, x22, [sp, #352]
 ; CHECK-NEXT:    ldp x22, x21, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    stp x19, x18, [sp, #272]
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll
index 281ffff8e5eeb..2f76be61ae192 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -debug-only=isel < %s 2>&1 | FileCheck %s
 
 ; REQUIRES: asserts
@@ -9,16 +9,15 @@ target triple = "aarch64-unknown-linux-gnu"
 ; accessing fixed width objects.
 define void @foo(ptr %a) #0 {
 ; CHECK-LABEL: foo:
-; CHECK:       SelectionDAG has 15 nodes:
+; CHECK:       SelectionDAG has 13 nodes:
 ; CHECK-NEXT:    t0: ch,glue = EntryToken
-; CHECK-NEXT:    t12: nxv2i1 = PTRUE_D TargetConstant:i32<31>
 ; CHECK-NEXT:    t2: i64,ch = CopyFromReg t0, Register:i64 %0
-; CHECK-NEXT:    t18: nxv2i64,ch = LD1D_IMM<Mem:(volatile load (s512) from %ir.a)> t12, t2, TargetConstant:i64<0>, t0
+; CHECK-NEXT:    t21: nxv2i64,ch = LDR_ZXI<Mem:(volatile load (<vscale x 1 x s128>) from %ir.a, align 64)> t2, TargetConstant:i64<0>, t0
 ; CHECK-NEXT:    t8: i64 = ADDXri TargetFrameIndex:i64<1>, TargetConstant:i32<0>, TargetConstant:i32<0>
 ; CHECK-NEXT:    t6: i64 = ADDXri TargetFrameIndex:i64<0>, TargetConstant:i32<0>, TargetConstant:i32<0>
-; CHECK-NEXT:    t17: ch = ST1D_IMM<Mem:(volatile store (s512) into %ir.r0)> t18, t12, t6, TargetConstant:i64<0>, t18:1
-; CHECK-NEXT:    t16: ch = ST1D_IMM<Mem:(volatile store (s512) into %ir.r1)> t18, t12, t8, TargetConstant:i64<0>, t17
-; CHECK-NEXT:    t10: ch = RET_ReallyLR t16
+; CHECK-NEXT:    t22: ch = STR_ZXI<Mem:(volatile store (<vscale x 1 x s128>) into %ir.r0, align 64)> t21, t6, TargetConstant:i64<0>, t21:1
+; CHECK-NEXT:    t23: ch = STR_ZXI<Mem:(volatile store (<vscale x 1 x s128>) into %ir.r1, align 64)> t21, t8, TargetConstant:i64<0>, t22
+; CHECK-NEXT:    t10: ch = RET_ReallyLR t23
 ; CHECK-EMPTY:
 entry:
   %r0 = alloca <8 x i64>
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
index d7b67d73a671e..7b82c0af329f0 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
@@ -380,11 +380,10 @@ define void @v8i32(ptr %ldptr, ptr %stptr) {
 ;
 ; CHECK-256-LABEL: v8i32:
 ; CHECK-256:       // %bb.0:
-; CHECK-256-NEXT:    ptrue p0.s
-; CHECK-256-NEXT:    ld1w { z0.s }, p0/z, [x0, #2, mul vl]
-; CHECK-256-NEXT:    ld1w { z1.s }, p0/z, [x0, #1, mul vl]
-; CHECK-256-NEXT:    st1w { z0.s }, p0, [x1, #2, mul vl]
-; CHECK-256-NEXT:    st1w { z1.s }, p0, [x1, #1, mul vl]
+; CHECK-256-NEXT:    ldr z0, [x0, #2, mul vl]
+; CHECK-256-NEXT:    ldr z1, [x0, #1, mul vl]
+; CHECK-256-NEXT:    str z0, [x1, #2, mul vl]
+; CHECK-256-NEXT:    str z1, [x1, #1, mul vl]
 ; CHECK-256-NEXT:    ret
 ;
 ; CHECK-512-LABEL: v8i32:
@@ -437,8 +436,7 @@ define void @v8i32_vscale(ptr %0) {
 ; CHECK-256-LABEL: v8i32_vscale:
 ; CHECK-256:       // %bb.0:
 ; CHECK-256-NEXT:    mov z0.s, #1 // =0x1
-; CHECK-256-NEXT:    ptrue p0.s
-; CHECK-256-NEXT:    st1w { z0.s }, p0, [x0, #2, mul vl]
+; CHECK-256-NEXT:    str z0, [x0, #2, mul vl]
 ; CHECK-256-NEXT:    ret
 ;
 ; CHECK-512-LABEL: v8i32_vscale:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-optimize-ptrue.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-optimize-ptrue.ll
index 1512f5488bda4..d5aad7670cf7a 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-optimize-ptrue.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-optimize-ptrue.ll
@@ -6,11 +6,10 @@ target triple = "aarch64-unknown-linux-gnu"
 define void @add_v64i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: add_v64i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
-; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
 ; CHECK-NEXT:    add z0.b, z0.b, z1.b
-; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <64 x i8>, ptr %a
   %op2 = load <64 x i8>, ptr %b
@@ -22,11 +21,10 @@ define void @add_v64i8(ptr %a, ptr %b) #0 {
 define void @add_v32i16(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: add_v32i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
-; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
 ; CHECK-NEXT:    add z0.h, z0.h, z1.h
-; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <32 x i16>, ptr %a
   %op2 = load <32 x i16>, ptr %b
@@ -38,10 +36,10 @@ define void @add_v32i16(ptr %a, ptr %b, ptr %c) #0 {
 define void @abs_v16i32(ptr %a) #0 {
 ; CHECK-LABEL: abs_v16i32:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x0]
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    abs z0.s, p0/m, z0.s
-; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x i32>, ptr %a
   %res = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %op1, i1 false)
@@ -52,10 +50,10 @@ define void @abs_v16i32(ptr %a) #0 {
 define void @abs_v8i64(ptr %a) #0 {
 ; CHECK-LABEL: abs_v8i64:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x0]
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    abs z0.d, p0/m, z0.d
-; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x i64>, ptr %a
   %res = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %op1, i1 false)
@@ -66,11 +64,10 @@ define void @abs_v8i64(ptr %a) #0 {
 define void @fadd_v32f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fadd_v32f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
-; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
 ; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
-; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <32 x half>, ptr %a
   %op2 = load <32 x half>, ptr %b
@@ -82,11 +79,10 @@ define void @fadd_v32f16(ptr %a, ptr %b) #0 {
 define void @fadd_v16f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fadd_v16f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
 ; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
-; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x float>, ptr %a
   %op2 = load <16 x float>, ptr %b
@@ -98,11 +94,10 @@ define void @fadd_v16f32(ptr %a, ptr %b) #0 {
 define void @fadd_v8f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fadd_v8f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
 ; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
-; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x double>, ptr %a
   %op2 = load <8 x double>, ptr %b
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
index 0d0b5cbc776c4..0cda4d94444e9 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
@@ -194,13 +194,12 @@ define void @test_revhv32i16(ptr %a) #0 {
 define void @test_rev_elts_fail(ptr %a) #1 {
 ; CHECK-LABEL: test_rev_elts_fail:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    adrp x8, .LCPI11_0
 ; CHECK-NEXT:    add x8, x8, :lo12:.LCPI11_0
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x8]
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x8]
 ; CHECK-NEXT:    tbl z0.d, { z0.d }, z1.d
-; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %tmp1 = load <4 x i64>, ptr %a
   %tmp2 = shufflevector <4 x i64> %tmp1, <4 x i64> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
@@ -275,10 +274,9 @@ define void @test_revv8i32(ptr %a) #0 {
 define void @test_revv32i8_vl256(ptr %a) #1 {
 ; CHECK-LABEL: test_revv32i8_vl256:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT:    ldr z0, [x0]
 ; CHECK-NEXT:    rev z0.b, z0.b
-; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %tmp1 = load <32 x i8>, ptr %a
   %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> poison, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -290,10 +288,9 @@ define void @test_revv32i8_vl256(ptr %a) #1 {
 define void @test_revv16i16_vl256(ptr %a) #1 {
 ; CHECK-LABEL: test_revv16i16_vl256:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ldr z0, [x0]
 ; CHECK-NEXT:    rev z0.h, z0.h
-; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %tmp1 = load <16 x i16>, ptr %a
   %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> poison, <16 x i32> <i32 poison, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -305,10 +302,9 @@ define void @test_revv16i16_vl256(ptr %a) #1 {
 define void @test_revv8f32_vl256(ptr %a) #1 {
 ; CHECK-LABEL: test_revv8f32_vl256:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ldr z0, [x0]
 ; CHECK-NEXT:    rev z0.s, z0.s
-; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %tmp1 = load <8 x float>, ptr %a
   %tmp2 = shufflevector <8 x float> %tmp1, <8 x float> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -320,10 +316,9 @@ define void @test_revv8f32_vl256(ptr %a) #1 {
 define void @test_revv4f64_vl256(ptr %a) #1 {
 ; CHECK-LABEL: test_revv4f64_vl256:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ldr z0, [x0]
 ; CHECK-NEXT:    rev z0.d, z0.d
-; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %tmp1 = load <4 x double>, ptr %a
   %tmp2 = shufflevector <4 x double> %tmp1, <4 x double> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -335,10 +330,9 @@ define void @test_revv4f64_vl256(ptr %a) #1 {
 define void @test_revv8i32v8i32(ptr %a, ptr %b) #1 {
 ; CHECK-LABEL: test_revv8i32v8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x1]
+; CHECK-NEXT:    ldr z0, [x1]
 ; CHECK-NEXT:    rev z0.s, z0.s
-; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
   %tmp1 = load <8 x i32>, ptr %a
   %tmp2 = load <8 x i32>, ptr %b
@@ -351,13 +345,12 @@ define void @test_revv8i32v8i32(ptr %a, ptr %b) #1 {
 define void @test_rev_fail(ptr %a) #1 {
 ; CHECK-LABEL: test_rev_fail:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    adrp x8, .LCPI20_0
 ...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/139236


More information about the llvm-commits mailing list