[llvm] [AArch64] Extend and rewrite load zero and load undef patterns (PR #108185)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 11 03:31:29 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-aarch64
Author: David Green (davemgreen)
<details>
<summary>Changes</summary>
The ldr instructions implicitly zero any upper lanes, so we can use them for insert(zerovec, load, 0) patterns. Likewise insert(undef, load, 0) or scalar_to_reg can reuse the scalar loads as the top bits are undef.
This patch makes sure there are patterns for each type and for each of the normal, unaligned, roW and roX addressing modes.
---
Patch is 97.29 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/108185.diff
11 Files Affected:
- (modified) llvm/lib/Target/AArch64/AArch64InstrFormats.td (+5)
- (modified) llvm/lib/Target/AArch64/AArch64InstrInfo.td (+81-133)
- (modified) llvm/test/CodeGen/AArch64/build-vector-two-dup.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/complex-deinterleaving-uniform-cases.ll (+52-52)
- (modified) llvm/test/CodeGen/AArch64/extbinopload.ll (+152-151)
- (modified) llvm/test/CodeGen/AArch64/load-insert-undef.ll (+20-40)
- (modified) llvm/test/CodeGen/AArch64/load-insert-zero.ll (+8-16)
- (modified) llvm/test/CodeGen/AArch64/merge-scoped-aa-store.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/neon-dotreduce.ll (+513-513)
- (modified) llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll (+1-2)
- (modified) llvm/test/CodeGen/AArch64/trunc-to-tbl.ll (+3-3)
``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index 16002011aedfbe..46b462de5071cb 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -177,6 +177,11 @@ def dup_v4f32 :
[(v2f32 (extract_subvector (v4f32 (AArch64duplane32 (v4f32 node:$LHS), node:$RHS)), (i64 0))),
(v2f32 (AArch64duplane32 (v4f32 node:$LHS), node:$RHS))]>;
+// Match either a scalar_to_vector (from SDAG) or a vector_insert of undef (from GISel)
+def vec_ins_or_scal_vec : PatFrags<(ops node:$src),
+ [(vector_insert undef, node:$src, (i64 0)),
+ (scalar_to_vector node:$src)]>;
+
//===----------------------------------------------------------------------===//
// Asm Operand Classes.
//
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 9f7ffff0cbfa76..704401f3cfef74 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -3320,63 +3320,6 @@ defm LDRSW : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
// Pre-fetch.
defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
-def vec_ins_or_scal_vec : PatFrags<(ops node:$src),
- [(vector_insert undef, node:$src, (i64 0)),
- (scalar_to_vector node:$src)]>;
-
-// For regular load, we do not have any alignment requirement.
-// Thus, it is safe to directly map the vector loads with interesting
-// addressing modes.
-// FIXME: We could do the same for bitconvert to floating point vectors.
-multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
- ValueType ScalTy, ValueType VecTy,
- Instruction LOADW, Instruction LOADX,
- SubRegIndex sub> {
- def : Pat<(VecTy (vec_ins_or_scal_vec (ScalTy
- (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
- (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
- (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
- sub)>;
-
- def : Pat<(VecTy (vec_ins_or_scal_vec (ScalTy
- (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
- (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
- (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
- sub)>;
-}
-
-let AddedComplexity = 10 in {
-defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v8i8, LDRBroW, LDRBroX, bsub>;
-defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v16i8, LDRBroW, LDRBroX, bsub>;
-
-defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
-defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
-
-defm : ScalToVecROLoadPat<ro16, load, i32, v4f16, LDRHroW, LDRHroX, hsub>;
-defm : ScalToVecROLoadPat<ro16, load, i32, v8f16, LDRHroW, LDRHroX, hsub>;
-
-defm : ScalToVecROLoadPat<ro32, load, i32, v2i32, LDRSroW, LDRSroX, ssub>;
-defm : ScalToVecROLoadPat<ro32, load, i32, v4i32, LDRSroW, LDRSroX, ssub>;
-
-defm : ScalToVecROLoadPat<ro32, load, f32, v2f32, LDRSroW, LDRSroX, ssub>;
-defm : ScalToVecROLoadPat<ro32, load, f32, v4f32, LDRSroW, LDRSroX, ssub>;
-
-defm : ScalToVecROLoadPat<ro64, load, i64, v2i64, LDRDroW, LDRDroX, dsub>;
-
-defm : ScalToVecROLoadPat<ro64, load, f64, v2f64, LDRDroW, LDRDroX, dsub>;
-
-
-def : Pat <(v1i64 (vec_ins_or_scal_vec (i64
- (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
- ro_Wextend64:$extend))))),
- (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
-
-def : Pat <(v1i64 (vec_ins_or_scal_vec (i64
- (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
- ro_Xextend64:$extend))))),
- (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
-}
-
// Match all load 64 bits width whose type is compatible with FPR64
multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
Instruction LOADW, Instruction LOADX> {
@@ -3500,42 +3443,6 @@ defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr",
def : Pat <(bf16 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
(LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
-// For regular load, we do not have any alignment requirement.
-// Thus, it is safe to directly map the vector loads with interesting
-// addressing modes.
-// FIXME: We could do the same for bitconvert to floating point vectors.
-def : Pat <(v8i8 (vec_ins_or_scal_vec (i32
- (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
- (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
- (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
-def : Pat <(v16i8 (vec_ins_or_scal_vec (i32
- (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
- (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
- (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
-def : Pat <(v4i16 (vec_ins_or_scal_vec (i32
- (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
- (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
- (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
-def : Pat <(v8i16 (vec_ins_or_scal_vec (i32
- (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
- (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
- (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
-def : Pat <(v2i32 (vec_ins_or_scal_vec (i32
- (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
- (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
- (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
-def : Pat <(v4i32 (vec_ins_or_scal_vec (i32
- (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
- (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
- (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
-def : Pat <(v1i64 (vec_ins_or_scal_vec (i64
- (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
- (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
-def : Pat <(v2i64 (vec_ins_or_scal_vec (i64
- (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
- (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
- (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
-
// Match all load 64 bits width whose type is compatible with FPR64
let Predicates = [IsLE] in {
// We must use LD1 to perform vector loads in big-endian.
@@ -3901,12 +3808,13 @@ def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
(LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
-// A LDR will implicitly zero the rest of the vector, so vector_insert(zeros,
-// load, 0) can use a single load.
-multiclass LoadInsertZeroPatterns<SDPatternOperator LoadOp, ValueType VT, ValueType HVT, ValueType SVT,
- ValueType ScalarVT, Instruction LoadInst, Instruction UnscaledLoadInst,
- ComplexPattern Addr, ComplexPattern UnscaledAddr, Operand AddrImm,
- SubRegIndex SubReg> {
+// A LDR will implicitly zero the rest of the vector, so vector_insert(zeros, load, 0)
+// can use a single load. Same for scalar_to_vector(load) or insert(undef, load, 0).
+multiclass LoadInsertVTPatterns<SDPatternOperator LoadOp, ValueType VT, ValueType ScalarVT,
+ Instruction LoadInst, Instruction UnscaledLoadInst,
+ Instruction ROWLoadInst, Instruction ROXLoadInst,
+ ROAddrMode ro, ComplexPattern Addr, ComplexPattern UnscaledAddr,
+ Operand AddrImm, SubRegIndex SubReg> {
// Scaled
def : Pat <(vector_insert (VT immAllZerosV),
(ScalarVT (LoadOp (Addr GPR64sp:$Rn, AddrImm:$offset))), (i64 0)),
@@ -3915,42 +3823,82 @@ multiclass LoadInsertZeroPatterns<SDPatternOperator LoadOp, ValueType VT, ValueT
def : Pat <(vector_insert (VT immAllZerosV),
(ScalarVT (LoadOp (UnscaledAddr GPR64sp:$Rn, simm9:$offset))), (i64 0)),
(SUBREG_TO_REG (i64 0), (UnscaledLoadInst GPR64sp:$Rn, simm9:$offset), SubReg)>;
+ // roW
+ def : Pat <(vector_insert (VT immAllZerosV),
+ (ScalarVT (LoadOp (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))), (i64 0)),
+ (SUBREG_TO_REG (i64 0), (ROWLoadInst GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend), SubReg)>;
+ // roX
+ def : Pat <(vector_insert (VT immAllZerosV),
+ (ScalarVT (LoadOp (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))), (i64 0)),
+ (SUBREG_TO_REG (i64 0), (ROXLoadInst GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend), SubReg)>;
- // Half-vector patterns
- def : Pat <(vector_insert (HVT immAllZerosV),
- (ScalarVT (LoadOp (Addr GPR64sp:$Rn, AddrImm:$offset))), (i64 0)),
- (SUBREG_TO_REG (i64 0), (LoadInst GPR64sp:$Rn, AddrImm:$offset), SubReg)>;
- // Unscaled
- def : Pat <(vector_insert (HVT immAllZerosV),
- (ScalarVT (LoadOp (UnscaledAddr GPR64sp:$Rn, simm9:$offset))), (i64 0)),
- (SUBREG_TO_REG (i64 0), (UnscaledLoadInst GPR64sp:$Rn, simm9:$offset), SubReg)>;
-
- // SVE patterns
- def : Pat <(vector_insert (SVT immAllZerosV),
- (ScalarVT (LoadOp (Addr GPR64sp:$Rn, AddrImm:$offset))), (i64 0)),
- (SUBREG_TO_REG (i64 0), (LoadInst GPR64sp:$Rn, AddrImm:$offset), SubReg)>;
- // Unscaled
- def : Pat <(vector_insert (SVT immAllZerosV),
- (ScalarVT (LoadOp (UnscaledAddr GPR64sp:$Rn, simm9:$offset))), (i64 0)),
+ // Undef equivalents of the patterns above.
+ def : Pat <(VT (vec_ins_or_scal_vec
+ (ScalarVT (LoadOp (Addr GPR64sp:$Rn, AddrImm:$offset))))),
+ (SUBREG_TO_REG (i64 0), (LoadInst GPR64sp:$Rn, AddrImm:$offset), SubReg)>;
+ def : Pat <(VT (vec_ins_or_scal_vec
+ (ScalarVT (LoadOp (UnscaledAddr GPR64sp:$Rn, simm9:$offset))))),
(SUBREG_TO_REG (i64 0), (UnscaledLoadInst GPR64sp:$Rn, simm9:$offset), SubReg)>;
-}
-
-defm : LoadInsertZeroPatterns<extloadi8, v16i8, v8i8, nxv16i8, i32, LDRBui, LDURBi,
- am_indexed8, am_unscaled8, uimm12s1, bsub>;
-defm : LoadInsertZeroPatterns<extloadi16, v8i16, v4i16, nxv8i16, i32, LDRHui, LDURHi,
- am_indexed16, am_unscaled16, uimm12s2, hsub>;
-defm : LoadInsertZeroPatterns<load, v4i32, v2i32, nxv4i32, i32, LDRSui, LDURSi,
- am_indexed32, am_unscaled32, uimm12s4, ssub>;
-defm : LoadInsertZeroPatterns<load, v2i64, v1i64, nxv2i64, i64, LDRDui, LDURDi,
- am_indexed64, am_unscaled64, uimm12s8, dsub>;
-defm : LoadInsertZeroPatterns<load, v8f16, v4f16, nxv8f16, f16, LDRHui, LDURHi,
- am_indexed16, am_unscaled16, uimm12s2, hsub>;
-defm : LoadInsertZeroPatterns<load, v8bf16, v4bf16, nxv8bf16, bf16, LDRHui, LDURHi,
- am_indexed16, am_unscaled16, uimm12s2, hsub>;
-defm : LoadInsertZeroPatterns<load, v4f32, v2f32, nxv4f32, f32, LDRSui, LDURSi,
- am_indexed32, am_unscaled32, uimm12s4, ssub>;
-defm : LoadInsertZeroPatterns<load, v2f64, v1f64, nxv2f64, f64, LDRDui, LDURDi,
- am_indexed64, am_unscaled64, uimm12s8, dsub>;
+ def : Pat <(VT (vec_ins_or_scal_vec
+ (ScalarVT (LoadOp (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))))),
+ (SUBREG_TO_REG (i64 0), (ROWLoadInst GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend), SubReg)>;
+ def : Pat <(VT (vec_ins_or_scal_vec
+ (ScalarVT (LoadOp (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))))),
+ (SUBREG_TO_REG (i64 0), (ROXLoadInst GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend), SubReg)>;
+}
+
+multiclass LoadInsertPatterns<SDPatternOperator LoadOp, ValueType VT, ValueType HVT, ValueType SVT,
+ ValueType ScalarVT, Instruction LoadInst, Instruction UnscaledLoadInst,
+ Instruction ROWLoadInst, Instruction ROXLoadInst,
+ ROAddrMode ro, ComplexPattern Addr, ComplexPattern UnscaledAddr,
+ Operand AddrImm, SubRegIndex SubReg> {
+ defm : LoadInsertVTPatterns<LoadOp, VT, ScalarVT, LoadInst, UnscaledLoadInst, ROWLoadInst,
+ ROXLoadInst, ro, Addr, UnscaledAddr, AddrImm, SubReg>;
+ defm : LoadInsertVTPatterns<LoadOp, HVT, ScalarVT, LoadInst, UnscaledLoadInst, ROWLoadInst,
+ ROXLoadInst, ro, Addr, UnscaledAddr, AddrImm, SubReg>;
+ defm : LoadInsertVTPatterns<LoadOp, SVT, ScalarVT, LoadInst, UnscaledLoadInst, ROWLoadInst,
+ ROXLoadInst, ro, Addr, UnscaledAddr, AddrImm, SubReg>;
+}
+
+defm : LoadInsertPatterns<extloadi8, v16i8, v8i8, nxv16i8, i32,
+ LDRBui, LDURBi, LDRBroW, LDRBroX,
+ ro8, am_indexed8, am_unscaled8, uimm12s1, bsub>;
+defm : LoadInsertPatterns<extloadi16, v8i16, v4i16, nxv8i16, i32,
+ LDRHui, LDURHi, LDRHroW, LDRHroX,
+ ro16, am_indexed16, am_unscaled16, uimm12s2, hsub>;
+defm : LoadInsertPatterns<load, v4i32, v2i32, nxv4i32, i32,
+ LDRSui, LDURSi, LDRSroW, LDRSroX,
+ ro32, am_indexed32, am_unscaled32, uimm12s4, ssub>;
+defm : LoadInsertPatterns<load, v2i64, isVoid, nxv2i64, i64,
+ LDRDui, LDURDi, LDRDroW, LDRDroX,
+ ro64, am_indexed64, am_unscaled64, uimm12s8, dsub>;
+defm : LoadInsertPatterns<load, v8f16, v4f16, nxv8f16, f16,
+ LDRHui, LDURHi, LDRHroW, LDRHroX,
+ ro16, am_indexed16, am_unscaled16, uimm12s2, hsub>;
+defm : LoadInsertPatterns<load, v8bf16, v4bf16, nxv8bf16, bf16,
+ LDRHui, LDURHi, LDRHroW, LDRHroX,
+ ro16, am_indexed16, am_unscaled16, uimm12s2, hsub>;
+defm : LoadInsertPatterns<load, v4f32, v2f32, nxv4f32, f32,
+ LDRSui, LDURSi, LDRSroW, LDRSroX,
+ ro32, am_indexed32, am_unscaled32, uimm12s4, ssub>;
+defm : LoadInsertPatterns<load, v2f64, isVoid, nxv2f64, f64,
+ LDRDui, LDURDi, LDRDroW, LDRDroX,
+ ro64, am_indexed64, am_unscaled64, uimm12s8, dsub>;
+
+// Extra patterns for v1f64 scalar_to_vector(load), which need to avoid the
+// SUBREG_TO_REG used above.
+def : Pat <(v1i64 (scalar_to_vector (i64
+ (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
+ (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
+def : Pat <(v1i64 (scalar_to_vector (i64
+ (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))))),
+ (LDURDi GPR64sp:$Rn, simm9:$offset)>;
+def : Pat <(v1i64 (scalar_to_vector (i64
+ (load (ro64.Wpat GPR64sp:$Rn, GPR32:$Rm, ro64.Wext:$extend))))),
+ (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro64.Wext:$extend)>;
+def : Pat <(v1i64 (scalar_to_vector (i64
+ (load (ro64.Xpat GPR64sp:$Rn, GPR64:$Rm, ro64.Xext:$extend))))),
+ (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro64.Xext:$extend)>;
// Pre-fetch.
defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
diff --git a/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll b/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
index 5cfa59a3022394..dbbfbea9176f6e 100644
--- a/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
+++ b/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
@@ -78,9 +78,9 @@ entry:
define <16 x i8> @test5(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
; CHECK-LABEL: test5:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0]
; CHECK-NEXT: adrp x8, .LCPI4_0
; CHECK-NEXT: ld1r { v1.16b }, [x1]
+; CHECK-NEXT: ldr b0, [x0]
; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI4_0]
; CHECK-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-uniform-cases.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-uniform-cases.ll
index e8d9ec7dc85de7..7686740aec3026 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-uniform-cases.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-uniform-cases.ll
@@ -203,89 +203,89 @@ define <12 x float> @abp90c12(<12 x float> %a, <12 x float> %b, <12 x float> %c)
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $s1 killed $s1 def $q1
; CHECK-NEXT: // kill: def $s3 killed $s3 def $q3
-; CHECK-NEXT: ldr s16, [sp, #40]
+; CHECK-NEXT: ldr s17, [sp, #40]
; CHECK-NEXT: add x10, sp, #56
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: add x9, sp, #48
; CHECK-NEXT: mov v1.s[1], v3.s[0]
; CHECK-NEXT: ldr s3, [sp, #32]
-; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
-; CHECK-NEXT: // kill: def $s5 killed $s5 def $q5
; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2
-; CHECK-NEXT: ldr s18, [sp, #8]
-; CHECK-NEXT: ld1 { v16.s }[1], [x10]
; CHECK-NEXT: mov v0.s[1], v2.s[0]
-; CHECK-NEXT: add x10, sp, #72
+; CHECK-NEXT: ld1 { v17.s }[1], [x10]
+; CHECK-NEXT: // kill: def $s5 killed $s5 def $q5
+; CHECK-NEXT: ldr s16, [sp, #8]
+; CHECK-NEXT: // kill: def $s4 killed $s4 def $q4
+; CHECK-NEXT: add x10, sp, #24
; CHECK-NEXT: ld1 { v3.s }[1], [x9]
-; CHECK-NEXT: add x9, sp, #64
-; CHECK-NEXT: ldr s17, [sp, #104]
+; CHECK-NEXT: add x9, sp, #72
; CHECK-NEXT: // kill: def $s7 killed $s7 def $q7
-; CHECK-NEXT: // kill: def $s4 killed $s4 def $q4
; CHECK-NEXT: // kill: def $s6 killed $s6 def $q6
-; CHECK-NEXT: ldr s2, [sp, #136]
-; CHECK-NEXT: ldr s20, [sp, #192]
+; CHECK-NEXT: ldr s2, [sp]
+; CHECK-NEXT: ld1 { v16.s }[1], [x10]
+; CHECK-NEXT: add x10, sp, #112
+; CHECK-NEXT: ldr s20, [sp, #136]
; CHECK-NEXT: mov v1.s[2], v5.s[0]
-; CHECK-NEXT: ld1 { v16.s }[2], [x10]
+; CHECK-NEXT: ld1 { v17.s }[2], [x9]
+; CHECK-NEXT: add x9, sp, #64
; CHECK-NEXT: ldr s5, [sp, #96]
; CHECK-NEXT: ld1 { v3.s }[2], [x9]
-; CHECK-NEXT: add x9, sp, #24
-; CHECK-NEXT: add x10, sp, #112
-; CHECK-NEXT: ld1 { v18.s }[1], [x9]
-; CHECK-NEXT: add x9, sp, #88
; CHECK-NEXT: mov v0.s[2], v4.s[0]
+; CHECK-NEXT: add x9, sp, #88
+; CHECK-NEXT: ldr s4, [sp, #104]
+; CHECK-NEXT: ldr s19, [sp, #192]
; CHECK-NEXT: ld1 { v5.s }[1], [x10]
; CHECK-NEXT: add x10, sp, #80
-; CHECK-NEXT: ld1 { v16.s }[3], [x9]
+; CHECK-NEXT: ld1 { v17.s }[3], [x9]
; CHECK-NEXT: mov v1.s[3], v7.s[0]
; CHECK-NEXT: add x9, sp, #120
-; CHECK-NEXT: ldr s4, [sp, #128]
; CHECK-NEXT: ld1 { v3.s }[3], [x10]
-; CHECK-NEXT: ld1 { v17.s }[1], [x9]
-; CHECK-NEXT: add x9, sp, #144
-; CHECK-NEXT: ldr s7, [sp]
; CHECK-NEXT: ld1 { v4.s }[1], [x9]
+; CHECK-NEXT: ldr s7, [sp, #128]
+; CHECK-NEXT: add x10, sp, #144
; CHECK-NEXT: mov v0.s[3], v6.s[0]
-; CHECK-NEXT: add x10, sp, #16
+; CHECK-NEXT: add x9, sp, #16
+; CHECK-NEXT: ld1 { v7.s }[1], [x10]
+; CHECK-NEXT: ld1 { v2.s }[1], [x9]
; CHECK-NEXT: add x9, sp, #160
-; CHECK-NEXT: fmul v6.4s, v16.4s, v1.4s
-; CHECK-NEXT: fmul v19.4s, v17.4s, v18.4s
-; CHECK-NEXT: fmul v18.4s, v5.4s, v18.4s
+; CHECK-NEXT: fmul v6.4s, v17.4s, v1.4s
+; CHECK-NEXT: fmul v18.4s, v4.4s, v16.4s
+; CHECK-NEXT: fmul v16.4s, v5.4s, v16.4s
; CHECK-NEXT: fmul v1.4s, v3.4s, v1.4s
-; CHECK-NEXT: ld1 { v7.s }[1], [x10]
-; CHECK-NEXT: ld1 { v4.s }[2], [x9]
-; CHECK-NEXT: add x9, sp, #152
; CHECK-NEXT: add x10, sp, #208
-; CHECK-NEXT: ld1 { v2.s }[1], [x9]
+; CHECK-NEXT: ld1 { v7.s }[2], [x9]
+; CHECK-NEXT: add x9, sp, #152
+; CHECK-NEXT: ld1 { v19.s }[1], [x10]
+; CHECK-NEXT: ld1 { v20.s }[1], [x9]
; CHECK-NEXT: add x9, sp, #176
-; CHECK-NEXT: ld1 { v20.s }[1], [x10]
+; CHECK-NEX...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/108185
More information about the llvm-commits
mailing list