[llvm] [AArch64] Make use of byte FPR stores for bytes extracted from vectors (PR #131793)

Benjamin Maxwell via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 28 04:39:25 PDT 2025


https://github.com/MacDue updated https://github.com/llvm/llvm-project/pull/131793

>From e5cb18b70954c7557b407e3ff874cada5fe864c9 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Tue, 4 Mar 2025 16:18:34 +0000
Subject: [PATCH] [AArch64] Make use of byte FPR stores bytes extracted from
 vectors

This helps avoid some pointless `fmovs` in some cases. Currently, this
is done in ISEL as FPR bytes are problematic in SDAG (as neither GPR
or FPR bytes are a legal type).
---
 llvm/include/llvm/CodeGen/ValueTypes.td       |   2 +
 llvm/lib/CodeGen/ValueTypes.cpp               |   2 +
 .../Target/AArch64/AArch64ISelLowering.cpp    |   1 +
 llvm/lib/Target/AArch64/AArch64InstrInfo.td   |  11 +-
 .../lib/Target/AArch64/AArch64RegisterInfo.td |   2 +-
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td |  37 ++++
 .../CodeGen/AArch64/aarch64-sve-ldst-one.ll   | 159 ++++++++++++++----
 llvm/test/CodeGen/AArch64/add.ll              |   3 +-
 llvm/test/CodeGen/AArch64/andorxor.ll         |   9 +-
 .../test/CodeGen/AArch64/arm64-collect-loh.ll |   9 +-
 llvm/test/CodeGen/AArch64/arm64-st1.ll        |  36 ++--
 llvm/test/CodeGen/AArch64/bitcast-v2i8.ll     |   3 +-
 llvm/test/CodeGen/AArch64/ctlz.ll             |   3 +-
 llvm/test/CodeGen/AArch64/ctpop.ll            |   3 +-
 llvm/test/CodeGen/AArch64/cttz.ll             |   3 +-
 .../CodeGen/AArch64/extract-vector-cmp.ll     |   7 +-
 llvm/test/CodeGen/AArch64/mul.ll              |   3 +-
 llvm/test/CodeGen/AArch64/neon-truncstore.ll  |   6 +-
 llvm/test/CodeGen/AArch64/nontemporal-load.ll |   3 +-
 llvm/test/CodeGen/AArch64/pr-cf624b2.ll       |   6 +-
 llvm/test/CodeGen/AArch64/sadd_sat_vec.ll     |   5 +-
 .../CodeGen/AArch64/setcc-type-mismatch.ll    |   3 +-
 llvm/test/CodeGen/AArch64/ssub_sat_vec.ll     |   5 +-
 llvm/test/CodeGen/AArch64/store.ll            |   3 +-
 llvm/test/CodeGen/AArch64/sub.ll              |   3 +-
 ...-streaming-mode-fixed-length-ld2-alloca.ll |   9 +-
 ...mode-fixed-length-masked-gather-scatter.ll |  12 +-
 llvm/test/CodeGen/AArch64/uadd_sat_vec.ll     |   5 +-
 llvm/test/CodeGen/AArch64/usub_sat_vec.ll     |   5 +-
 .../vec-combine-compare-truncate-store.ll     |  11 +-
 .../AArch64/vec3-loads-ext-trunc-stores.ll    |  26 ++-
 llvm/test/CodeGen/AArch64/vector-compress.ll  |   2 +-
 32 files changed, 261 insertions(+), 136 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/ValueTypes.td b/llvm/include/llvm/CodeGen/ValueTypes.td
index fc1a95e33380b..42c4830e94220 100644
--- a/llvm/include/llvm/CodeGen/ValueTypes.td
+++ b/llvm/include/llvm/CodeGen/ValueTypes.td
@@ -338,6 +338,8 @@ def amdgpuBufferFatPointer : ValueType<160, 234>;
 // FIXME: Remove this and the getPointerType() override if MVT::i82 is added.
 def amdgpuBufferStridedPointer : ValueType<192, 235>;
 
+def vi8       : ValueType<8,  236>;  // 8-bit integer in FPR (AArch64)
+
 let isNormalValueType = false in {
 def token      : ValueType<0, 504>;  // TokenTy
 def MetadataVT : ValueType<0, 505> { // Metadata
diff --git a/llvm/lib/CodeGen/ValueTypes.cpp b/llvm/lib/CodeGen/ValueTypes.cpp
index 0554b6387c5e6..c769568253b12 100644
--- a/llvm/lib/CodeGen/ValueTypes.cpp
+++ b/llvm/lib/CodeGen/ValueTypes.cpp
@@ -198,6 +198,8 @@ std::string EVT::getEVTString() const {
     return "amdgpuBufferFatPointer";
   case MVT::amdgpuBufferStridedPointer:
     return "amdgpuBufferStridedPointer";
+  case MVT::vi8:
+    return "vi8";
   }
 }
 
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 1c8e3afdfd718..5fec669da9c33 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -401,6 +401,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
   }
 
   if (Subtarget->hasFPARMv8()) {
+    addRegisterClass(MVT::vi8, &AArch64::FPR8RegClass);
     addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
     addRegisterClass(MVT::bf16, &AArch64::FPR16RegClass);
     addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 6c61e3a613f6f..1c1ff656db910 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -3575,7 +3575,7 @@ defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr",
                          (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
 let Predicates = [HasFPARMv8] in {
 defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr",
-                   [(set FPR8Op:$Rt,
+                   [(set (i8 FPR8Op:$Rt),
                          (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
 defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr",
                    [(set (f16 FPR16Op:$Rt),
@@ -3763,7 +3763,7 @@ defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur",
                           (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
 let Predicates = [HasFPARMv8] in {
 defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur",
-                    [(set FPR8Op:$Rt,
+                    [(set (i8 FPR8Op:$Rt),
                           (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
 defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur",
                     [(set (f16 FPR16Op:$Rt),
@@ -4333,7 +4333,7 @@ defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str",
                             (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
 let Predicates = [HasFPARMv8] in {
 defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str",
-                    [(store FPR8Op:$Rt,
+                    [(store (i8 FPR8Op:$Rt),
                             (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
 defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str",
                     [(store (f16 FPR16Op:$Rt),
@@ -4451,6 +4451,8 @@ multiclass VecStoreLane0Pat<ComplexPattern UIAddrMode, SDPatternOperator storeop
 }
 
 let AddedComplexity = 19 in {
+  defm : VecStoreLane0Pat<am_indexed8,   truncstorei8, v16i8, i32, vi8, bsub, uimm12s2, STRBui>;
+  defm : VecStoreLane0Pat<am_indexed8,   truncstorei8, v4i32, i32, vi8, bsub, uimm12s2, STRBui>;
   defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, f16, hsub, uimm12s2, STRHui>;
   defm : VecStoreLane0Pat<am_indexed16,         store, v8f16, f16, f16, hsub, uimm12s2, STRHui>;
   defm : VecStoreLane0Pat<am_indexed32,         store, v4i32, i32, i32, ssub, uimm12s4, STRSui>;
@@ -4469,7 +4471,7 @@ defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur",
                                  (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
 let Predicates = [HasFPARMv8] in {
 defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur",
-                         [(store FPR8Op:$Rt,
+                         [(store (i8 FPR8Op:$Rt),
                                  (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
 defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur",
                          [(store (f16 FPR16Op:$Rt),
@@ -4598,6 +4600,7 @@ multiclass VecStoreULane0Pat<SDPatternOperator StoreOp,
 }
 
 let AddedComplexity = 19 in {
+  defm : VecStoreULane0Pat<truncstorei8,  v16i8, i32, vi8, bsub, STURBi>;
   defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, f16, hsub, STURHi>;
   defm : VecStoreULane0Pat<store,         v8f16, f16, f16, hsub, STURHi>;
   defm : VecStoreULane0Pat<store,         v4i32, i32, i32, ssub, STURSi>;
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td
index fed9b7b173e9c..42ba1451650ed 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td
@@ -497,7 +497,7 @@ def Q30   : AArch64Reg<30, "q30", [D30, D30_HI], ["v30", ""]>, DwarfRegAlias<B30
 def Q31   : AArch64Reg<31, "q31", [D31, D31_HI], ["v31", ""]>, DwarfRegAlias<B31>;
 }
 
-def FPR8  : RegisterClass<"AArch64", [i8], 8, (sequence "B%u", 0, 31)> {
+def FPR8  : RegisterClass<"AArch64", [i8, vi8], 8, (sequence "B%u", 0, 31)> {
   let Size = 8;
   let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::FPR8RegClassID, 0, 32>";
 }
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 3ee71c14c6bd4..1884a90828acb 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -1827,6 +1827,43 @@ let Predicates = [HasSVE] in {
   defm : adrXtwShiftPat<nxv2i64, nxv2i1, 3>;
 } // End HasSVE
 
+multiclass SVEVecStoreLanePat<ComplexPattern UIAddrMode, SDPatternOperator storeop,
+                              ValueType VTy, ValueType STy,
+                              ValueType SubRegTy,
+                              SubRegIndex SubRegIdx, Operand IndexType,
+                              Instruction STR,
+                              Instruction DUP, AsmVectorIndexOpnd DUPIdxTy> {
+  let Predicates = [HasSVE_or_SME] in {
+    // Same as Neon VecStoreLane0Pat but without matching VecListOne128.
+    def : Pat<(storeop (STy (vector_extract VTy:$Vt, (i64 0))),
+                       (UIAddrMode GPR64sp:$Rn, IndexType:$offset)),
+              (STR (SubRegTy (EXTRACT_SUBREG $Vt, SubRegIdx)),
+                    GPR64sp:$Rn, IndexType:$offset)>;
+  }
+
+  // Non-zero immediate index:
+  def : Pat<(storeop (STy (vector_extract VTy:$Vt, DUPIdxTy:$idx)),
+                     (UIAddrMode GPR64sp:$Rn, IndexType:$offset)),
+            (STR (SubRegTy (EXTRACT_SUBREG (DUP $Vt, DUPIdxTy:$idx), SubRegIdx)),
+                  GPR64sp:$Rn, IndexType:$offset)>;
+}
+
+// Note: Types other than i8 are handled in performSTORECombine -- i8 is tricky
+// to handle before ISEL as it is not really a legal type in many places, nor
+// is its equivalently sized FP variant.
+let AddedComplexity = 19 in {
+  // Lane 0 truncating stores
+  // i32 -> i8
+  defm : SVEVecStoreLanePat<am_indexed8,  truncstorei8, nxv4i32, i32, vi8, bsub, uimm12s4, STRBui, DUP_ZZI_S, sve_elm_idx_extdup_s>;
+  defm : SVEVecStoreLanePat<am_unscaled8, truncstorei8, nxv4i32, i32, vi8, bsub, simm9, STURBi, DUP_ZZI_S, sve_elm_idx_extdup_s>;
+  // i64 -> i8
+  defm : SVEVecStoreLanePat<am_indexed8,  truncstorei8, nxv2i64, i64, vi8, bsub, uimm12s4, STRBui, DUP_ZZI_D, sve_elm_idx_extdup_d>;
+  defm : SVEVecStoreLanePat<am_unscaled8, truncstorei8, nxv2i64, i64, vi8, bsub, simm9, STURBi, DUP_ZZI_D, sve_elm_idx_extdup_d>;
+  // i8 -> i8 (technically a truncate as the extracted type is i32)
+  defm : SVEVecStoreLanePat<am_indexed8,  truncstorei8, nxv16i8, i32, vi8, bsub, uimm12s4, STRBui, DUP_ZZI_B, sve_elm_idx_extdup_b>;
+  defm : SVEVecStoreLanePat<am_unscaled8, truncstorei8, nxv16i8, i32, vi8, bsub, simm9, STURBi, DUP_ZZI_B, sve_elm_idx_extdup_b>;
+}
+
 let Predicates = [HasSVE_or_SME] in {
   defm TBL_ZZZ  : sve_int_perm_tbl<"tbl", AArch64tbl>;
 
diff --git a/llvm/test/CodeGen/AArch64/aarch64-sve-ldst-one.ll b/llvm/test/CodeGen/AArch64/aarch64-sve-ldst-one.ll
index d39c9bf760621..b91cb872a9e0a 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-sve-ldst-one.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-sve-ldst-one.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -verify-machineinstrs -mattr=+sve -global-isel=0 | FileCheck %s --check-prefixes=CHECK,CHECK-NONSTREAMING
-; RUN: llc < %s -verify-machineinstrs -mattr=+sme -global-isel=0 -force-streaming | FileCheck %s --check-prefixes=CHECK,STREAMING-COMPAT
-; RUN: llc < %s -verify-machineinstrs -mattr=+sve -global-isel=0 -force-streaming-compatible | FileCheck %s --check-prefixes=CHECK,STREAMING-COMPAT
+; RUN: llc < %s -verify-machineinstrs -mattr=+sve -global-isel=0 | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mattr=+sme -global-isel=0 -force-streaming | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mattr=+sve -global-isel=0 -force-streaming-compatible | FileCheck %s
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -106,18 +106,11 @@ entry:
 }
 
 define void @test_str_lane_s8(ptr %a, <vscale x 16 x i8> %b) {
-; CHECK-NONSTREAMING-LABEL: test_str_lane_s8:
-; CHECK-NONSTREAMING:       // %bb.0: // %entry
-; CHECK-NONSTREAMING-NEXT:    umov w8, v0.b[7]
-; CHECK-NONSTREAMING-NEXT:    strb w8, [x0]
-; CHECK-NONSTREAMING-NEXT:    ret
-;
-; STREAMING-COMPAT-LABEL: test_str_lane_s8:
-; STREAMING-COMPAT:       // %bb.0: // %entry
-; STREAMING-COMPAT-NEXT:    mov z0.b, z0.b[7]
-; STREAMING-COMPAT-NEXT:    fmov w8, s0
-; STREAMING-COMPAT-NEXT:    strb w8, [x0]
-; STREAMING-COMPAT-NEXT:    ret
+; CHECK-LABEL: test_str_lane_s8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov z0.b, z0.b[7]
+; CHECK-NEXT:    str b0, [x0]
+; CHECK-NEXT:    ret
 
 entry:
   %0 = extractelement <vscale x 16 x i8> %b, i32 7
@@ -128,8 +121,7 @@ entry:
 define void @test_str_lane0_s8(ptr %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: test_str_lane0_s8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strb w8, [x0]
+; CHECK-NEXT:    str b0, [x0]
 ; CHECK-NEXT:    ret
 
 entry:
@@ -201,6 +193,19 @@ define void @test_str_reduction_i32_to_i16(ptr %ptr, <vscale x 4 x i1> %p0, <vsc
   ret void
 }
 
+define void @test_str_reduction_i32_to_i8(ptr %ptr, <vscale x 4 x i1> %p0, <vscale x 4 x i32> %v) {
+; CHECK-LABEL: test_str_reduction_i32_to_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uaddv d0, p0, z0.s
+; CHECK-NEXT:    str b0, [x0]
+; CHECK-NEXT:    ret
+
+  %reduce = tail call i64 @llvm.aarch64.sve.uaddv.nxv4i32(<vscale x 4 x i1> %p0, <vscale x 4 x i32> %v)
+  %trunc = trunc i64 %reduce to i8
+  store i8 %trunc, ptr %ptr, align 1
+  ret void
+}
+
 define void @test_str_reduction_i32_to_i32_negative_offset(ptr %ptr, <vscale x 4 x i1> %p0, <vscale x 4 x i32> %v) {
 ; CHECK-LABEL: test_str_reduction_i32_to_i32_negative_offset:
 ; CHECK:       // %bb.0:
@@ -242,6 +247,20 @@ define void @test_str_reduction_i32_to_i16_negative_offset(ptr %ptr, <vscale x 4
   ret void
 }
 
+define void @test_str_reduction_i32_to_i8_negative_offset(ptr %ptr, <vscale x 4 x i1> %p0, <vscale x 4 x i32> %v) {
+; CHECK-LABEL: test_str_reduction_i32_to_i8_negative_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uaddv d0, p0, z0.s
+; CHECK-NEXT:    stur b0, [x0, #-8]
+; CHECK-NEXT:    ret
+
+  %reduce = tail call i64 @llvm.aarch64.sve.uaddv.nxv4i32(<vscale x 4 x i1> %p0, <vscale x 4 x i32> %v)
+  %trunc = trunc i64 %reduce to i8
+  %out_ptr = getelementptr inbounds i8, ptr %ptr, i64 -8
+  store i8 %trunc, ptr %out_ptr, align 1
+  ret void
+}
+
 define void @test_str_lane_s32_negative_offset(ptr %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: test_str_lane_s32_negative_offset:
 ; CHECK:       // %bb.0: // %entry
@@ -297,18 +316,11 @@ entry:
 }
 
 define void @test_str_lane_s8_negative_offset(ptr %a, <vscale x 16 x i8> %b) {
-; CHECK-NONSTREAMING-LABEL: test_str_lane_s8_negative_offset:
-; CHECK-NONSTREAMING:       // %bb.0: // %entry
-; CHECK-NONSTREAMING-NEXT:    umov w8, v0.b[7]
-; CHECK-NONSTREAMING-NEXT:    sturb w8, [x0, #-8]
-; CHECK-NONSTREAMING-NEXT:    ret
-;
-; STREAMING-COMPAT-LABEL: test_str_lane_s8_negative_offset:
-; STREAMING-COMPAT:       // %bb.0: // %entry
-; STREAMING-COMPAT-NEXT:    mov z0.b, z0.b[7]
-; STREAMING-COMPAT-NEXT:    fmov w8, s0
-; STREAMING-COMPAT-NEXT:    sturb w8, [x0, #-8]
-; STREAMING-COMPAT-NEXT:    ret
+; CHECK-LABEL: test_str_lane_s8_negative_offset:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov z0.b, z0.b[7]
+; CHECK-NEXT:    stur b0, [x0, #-8]
+; CHECK-NEXT:    ret
 
 entry:
   %0 = extractelement <vscale x 16 x i8> %b, i32 7
@@ -320,8 +332,7 @@ entry:
 define void @test_str_lane0_s8_negative_offset(ptr %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: test_str_lane0_s8_negative_offset:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    sturb w8, [x0, #-8]
+; CHECK-NEXT:    stur b0, [x0, #-8]
 ; CHECK-NEXT:    ret
 
 entry:
@@ -385,6 +396,48 @@ entry:
   ret void
 }
 
+
+define void @test_str_trunc_lane_s32_to_s8(ptr %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: test_str_trunc_lane_s32_to_s8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov z0.s, z0.s[3]
+; CHECK-NEXT:    str b0, [x0]
+; CHECK-NEXT:    ret
+
+entry:
+  %0 = extractelement <vscale x 4 x i32> %b, i32 3
+  %trunc = trunc i32 %0 to i8
+  store i8 %trunc, ptr %a, align 1
+  ret void
+}
+
+define void @test_str_trunc_lane0_s32_to_s8(ptr %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: test_str_trunc_lane0_s32_to_s8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str b0, [x0]
+; CHECK-NEXT:    ret
+
+entry:
+  %0 = extractelement <vscale x 4 x i32> %b, i32 0
+  %trunc = trunc i32 %0 to i8
+  store i8 %trunc, ptr %a, align 1
+  ret void
+}
+
+define void @test_str_trunc_lane_s64_to_s8(ptr %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: test_str_trunc_lane_s64_to_s8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov z0.d, z0.d[3]
+; CHECK-NEXT:    str b0, [x0]
+; CHECK-NEXT:    ret
+
+entry:
+  %0 = extractelement <vscale x 2 x i64> %b, i32 3
+  %trunc = trunc i64 %0 to i8
+  store i8 %trunc, ptr %a, align 1
+  ret void
+}
+
 define void @test_str_trunc_lane_s32_to_s16_negative_offset(ptr %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: test_str_trunc_lane_s32_to_s16_negative_offset:
 ; CHECK:       // %bb.0: // %entry
@@ -413,3 +466,47 @@ entry:
   store i16 %trunc, ptr %out_ptr, align 2
   ret void
 }
+
+define void @test_str_trunc_lane_s32_to_s8_negative_offset(ptr %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: test_str_trunc_lane_s32_to_s8_negative_offset:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov z0.s, z0.s[3]
+; CHECK-NEXT:    stur b0, [x0, #-8]
+; CHECK-NEXT:    ret
+
+entry:
+  %0 = extractelement <vscale x 4 x i32> %b, i32 3
+  %trunc = trunc i32 %0 to i8
+  %out_ptr = getelementptr inbounds i8, ptr %a, i64 -8
+  store i8 %trunc, ptr %out_ptr, align 1
+  ret void
+}
+
+define void @test_str_trunc_lane0_s32_to_s8_negative_offset(ptr %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: test_str_trunc_lane0_s32_to_s8_negative_offset:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    stur b0, [x0, #-8]
+; CHECK-NEXT:    ret
+
+entry:
+  %0 = extractelement <vscale x 4 x i32> %b, i32 0
+  %trunc = trunc i32 %0 to i8
+  %out_ptr = getelementptr inbounds i8, ptr %a, i64 -8
+  store i8 %trunc, ptr %out_ptr, align 1
+  ret void
+}
+
+define void @test_str_trunc_lane_s64_to_s8_negative_offset(ptr %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: test_str_trunc_lane_s64_to_s8_negative_offset:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov z0.d, z0.d[3]
+; CHECK-NEXT:    stur b0, [x0, #-8]
+; CHECK-NEXT:    ret
+
+entry:
+  %0 = extractelement <vscale x 2 x i64> %b, i32 3
+  %trunc = trunc i64 %0 to i8
+  %out_ptr = getelementptr inbounds i8, ptr %a, i64 -8
+  store i8 %trunc, ptr %out_ptr, align 1
+  ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/add.ll b/llvm/test/CodeGen/AArch64/add.ll
index fc0ba336b21cc..cdde359d09d7b 100644
--- a/llvm/test/CodeGen/AArch64/add.ll
+++ b/llvm/test/CodeGen/AArch64/add.ll
@@ -64,8 +64,7 @@ define void @v2i8(ptr %p1, ptr %p2) {
 ; CHECK-SD-NEXT:    ld1 { v1.b }[4], [x9]
 ; CHECK-SD-NEXT:    add v0.2s, v0.2s, v1.2s
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x0]
+; CHECK-SD-NEXT:    str b0, [x0]
 ; CHECK-SD-NEXT:    strb w8, [x0, #1]
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/andorxor.ll b/llvm/test/CodeGen/AArch64/andorxor.ll
index 24f2549cce785..03c7bad9efc22 100644
--- a/llvm/test/CodeGen/AArch64/andorxor.ll
+++ b/llvm/test/CodeGen/AArch64/andorxor.ll
@@ -184,8 +184,7 @@ define void @and_v2i8(ptr %p1, ptr %p2) {
 ; CHECK-SD-NEXT:    ld1 { v1.b }[4], [x9]
 ; CHECK-SD-NEXT:    and v0.8b, v0.8b, v1.8b
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x0]
+; CHECK-SD-NEXT:    str b0, [x0]
 ; CHECK-SD-NEXT:    strb w8, [x0, #1]
 ; CHECK-SD-NEXT:    ret
 ;
@@ -221,8 +220,7 @@ define void @or_v2i8(ptr %p1, ptr %p2) {
 ; CHECK-SD-NEXT:    ld1 { v1.b }[4], [x9]
 ; CHECK-SD-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x0]
+; CHECK-SD-NEXT:    str b0, [x0]
 ; CHECK-SD-NEXT:    strb w8, [x0, #1]
 ; CHECK-SD-NEXT:    ret
 ;
@@ -258,8 +256,7 @@ define void @xor_v2i8(ptr %p1, ptr %p2) {
 ; CHECK-SD-NEXT:    ld1 { v1.b }[4], [x9]
 ; CHECK-SD-NEXT:    eor v0.8b, v0.8b, v1.8b
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x0]
+; CHECK-SD-NEXT:    str b0, [x0]
 ; CHECK-SD-NEXT:    strb w8, [x0, #1]
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll b/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll
index 2c065e0051cd7..7f2bebf584d8f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll
@@ -615,11 +615,10 @@ define <1 x i8> @getL() {
 ; CHECK-NEXT: ; kill
 ; CHECK-NEXT: [[LDRGOT_LABEL:Lloh[0-9]+]]:
 ; CHECK-NEXT: ldr {{[xw]}}[[LDRGOT_REG:[0-9]+]], [[[ADRP_REG]], _L at GOTPAGEOFF]
-; Ultimately we should generate str b0, but right now, we match the vector
-; variant which does not allow to fold the immediate into the store.
-; CHECK-NEXT: st1.b { v0 }[0], [x[[LDRGOT_REG]]]
+; CHECK-NEXT: [[STR_LABEL:Lloh[0-9]+]]:
+; CHECK-NEXT: str b0, [x[[LDRGOT_REG]]]
 ; CHECK-NEXT: ret
-; CHECK: .loh AdrpLdrGot [[ADRP_LABEL]], [[LDRGOT_LABEL]]
+; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setL(<1 x i8> %t) {
   store <1 x i8> %t, ptr @L, align 4
   ret void
@@ -678,6 +677,6 @@ if.end.i:
   call void (ptr, ...) @callee(ptr @.str.89, ptr @.str.90, double %sub)
   unreachable
 }
-declare void @callee(ptr nocapture readonly, ...) 
+declare void @callee(ptr nocapture readonly, ...)
 
 attributes #0 = { "target-cpu"="cyclone" }
diff --git a/llvm/test/CodeGen/AArch64/arm64-st1.ll b/llvm/test/CodeGen/AArch64/arm64-st1.ll
index 6f87c66c87345..c63d66c4e7706 100644
--- a/llvm/test/CodeGen/AArch64/arm64-st1.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-st1.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s
-; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,SD-CHECK
+; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,GI-CHECK
 ; The instruction latencies of Exynos-M3 trigger the transform we see under the Exynos check.
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs -mcpu=exynos-m3 | FileCheck --check-prefix=EXYNOS %s
 
@@ -13,8 +13,11 @@ define void @st1lane_16b(<16 x i8> %A, ptr %D) {
 }
 
 define void @st1lane0_16b(<16 x i8> %A, ptr %D) {
-; CHECK-LABEL: st1lane0_16b
-; CHECK: st1.b { v0 }[0], [x{{[0-9]+}}]
+; SD-CHECK-LABEL: st1lane0_16b
+; SD-CHECK: str b0, [x{{[0-9]+}}, #1]
+
+; GI-CHECK-LABEL: st1lane0_16b
+; GI-CHECK: st1.b { v0 }[0], [x{{[0-9]+}}]
   %ptr = getelementptr i8, ptr %D, i64 1
   %tmp = extractelement <16 x i8> %A, i32 0
   store i8 %tmp, ptr %ptr
@@ -22,8 +25,11 @@ define void @st1lane0_16b(<16 x i8> %A, ptr %D) {
 }
 
 define void @st1lane0u_16b(<16 x i8> %A, ptr %D) {
-; CHECK-LABEL: st1lane0u_16b
-; CHECK: st1.b { v0 }[0], [x{{[0-9]+}}]
+; SD-CHECK-LABEL: st1lane0u_16b
+; SD-CHECK: stur b0, [x{{[0-9]+}}, #-1]
+
+; GI-CHECK-LABEL: st1lane0u_16b
+; GI-CHECK: st1.b { v0 }[0], [x{{[0-9]+}}]
   %ptr = getelementptr i8, ptr %D, i64 -1
   %tmp = extractelement <16 x i8> %A, i32 0
   store i8 %tmp, ptr %ptr
@@ -41,9 +47,12 @@ define void @st1lane_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) {
 }
 
 define void @st1lane0_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) {
-; CHECK-LABEL: st1lane0_ro_16b
-; CHECK: add x[[XREG:[0-9]+]], x0, x1
-; CHECK: st1.b { v0 }[0], [x[[XREG]]]
+; SD-CHECK-LABEL: st1lane0_ro_16b
+; SD-CHECK: str b0, [x0, x1]
+
+; GI-CHECK-LABEL: st1lane0_ro_16b
+; GI-CHECK: add x[[XREG:[0-9]+]], x0, x1
+; GI-CHECK: st1.b { v0 }[0], [x[[XREG]]]
   %ptr = getelementptr i8, ptr %D, i64 %offset
   %tmp = extractelement <16 x i8> %A, i32 0
   store i8 %tmp, ptr %ptr
@@ -300,9 +309,12 @@ define void @st1lane_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) {
 }
 
 define void @st1lane0_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) {
-; CHECK-LABEL: st1lane0_ro_8b
-; CHECK: add x[[XREG:[0-9]+]], x0, x1
-; CHECK: st1.b { v0 }[0], [x[[XREG]]]
+; SD-CHECK-LABEL: st1lane0_ro_8b
+; SD-CHECK: str b0, [x0, x1]
+
+; GI-CHECK-LABEL: st1lane0_ro_8b
+; GI-CHECK: add x[[XREG:[0-9]+]], x0, x1
+; GI-CHECK: st1.b { v0 }[0], [x[[XREG]]]
   %ptr = getelementptr i8, ptr %D, i64 %offset
   %tmp = extractelement <8 x i8> %A, i32 0
   store i8 %tmp, ptr %ptr
diff --git a/llvm/test/CodeGen/AArch64/bitcast-v2i8.ll b/llvm/test/CodeGen/AArch64/bitcast-v2i8.ll
index aff3ffc70a711..77304aef4385e 100644
--- a/llvm/test/CodeGen/AArch64/bitcast-v2i8.ll
+++ b/llvm/test/CodeGen/AArch64/bitcast-v2i8.ll
@@ -5,9 +5,8 @@
 define i16 @test_bitcast_v2i8_to_i16(<2 x i8> %a) {
 ; CHECK-LABEL: test_bitcast_v2i8_to_i16
 ; CHECK:      mov.s   [[WREG_HI:w[0-9]+]], v0[1]
-; CHECK-NEXT: fmov    [[WREG_LO:w[0-9]+]], s0
 ; CHECK-NEXT: strb    [[WREG_HI]], [sp, #15]
-; CHECK-NEXT: strb    [[WREG_LO]], [sp, #14]
+; CHECK-NEXT: str     [[WREG_LO:b[0-9]+]], [sp, #14]
 ; CHECK-NEXT: ldrh    w0, [sp, #14]
 
   %aa = bitcast <2 x i8> %a to i16
diff --git a/llvm/test/CodeGen/AArch64/ctlz.ll b/llvm/test/CodeGen/AArch64/ctlz.ll
index 742433c50d390..79676efebe776 100644
--- a/llvm/test/CodeGen/AArch64/ctlz.ll
+++ b/llvm/test/CodeGen/AArch64/ctlz.ll
@@ -14,8 +14,7 @@ define void @v2i8(ptr %p1) {
 ; CHECK-SD-NEXT:    clz v1.2s, v1.2s
 ; CHECK-SD-NEXT:    sub v0.2s, v1.2s, v0.2s
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x0]
+; CHECK-SD-NEXT:    str b0, [x0]
 ; CHECK-SD-NEXT:    strb w8, [x0, #1]
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/ctpop.ll b/llvm/test/CodeGen/AArch64/ctpop.ll
index c7c378d3e67cd..767b9d28d6215 100644
--- a/llvm/test/CodeGen/AArch64/ctpop.ll
+++ b/llvm/test/CodeGen/AArch64/ctpop.ll
@@ -14,8 +14,7 @@ define void @v2i8(ptr %p1) {
 ; CHECK-SD-NEXT:    uaddlp v0.4h, v0.8b
 ; CHECK-SD-NEXT:    uaddlp v0.2s, v0.4h
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x0]
+; CHECK-SD-NEXT:    str b0, [x0]
 ; CHECK-SD-NEXT:    strb w8, [x0, #1]
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/cttz.ll b/llvm/test/CodeGen/AArch64/cttz.ll
index 41843e03cb81e..97f5a29064c67 100644
--- a/llvm/test/CodeGen/AArch64/cttz.ll
+++ b/llvm/test/CodeGen/AArch64/cttz.ll
@@ -17,8 +17,7 @@ define void @v2i8(ptr %p1) {
 ; CHECK-SD-NEXT:    clz v0.2s, v0.2s
 ; CHECK-SD-NEXT:    sub v0.2s, v1.2s, v0.2s
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x0]
+; CHECK-SD-NEXT:    str b0, [x0]
 ; CHECK-SD-NEXT:    strb w8, [x0, #1]
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/extract-vector-cmp.ll b/llvm/test/CodeGen/AArch64/extract-vector-cmp.ll
index 8345fdfa46b4c..f076ee12427d8 100644
--- a/llvm/test/CodeGen/AArch64/extract-vector-cmp.ll
+++ b/llvm/test/CodeGen/AArch64/extract-vector-cmp.ll
@@ -184,17 +184,16 @@ define i1 @extract_icmp_v4i32_splat_rhs_mul_use(<4 x i32> %a, ptr %p) {
 ; CHECK-LABEL: extract_icmp_v4i32_splat_rhs_mul_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v1.4s, #235
-; CHECK-NEXT:    adrp x9, .LCPI8_0
+; CHECK-NEXT:    adrp x8, .LCPI8_0
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI8_0]
 ; CHECK-NEXT:    mov x8, x0
-; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI8_0]
 ; CHECK-NEXT:    cmhi v0.4s, v1.4s, v0.4s
 ; CHECK-NEXT:    xtn v1.4h, v0.4s
 ; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    addv s0, v0.4s
 ; CHECK-NEXT:    umov w9, v1.h[1]
-; CHECK-NEXT:    fmov w10, s0
+; CHECK-NEXT:    str b0, [x8]
 ; CHECK-NEXT:    and w0, w9, #0x1
-; CHECK-NEXT:    strb w10, [x8]
 ; CHECK-NEXT:    ret
   %icmp = icmp ult <4 x i32> %a, splat(i32 235)
   %ext = extractelement <4 x i1> %icmp, i32 1
diff --git a/llvm/test/CodeGen/AArch64/mul.ll b/llvm/test/CodeGen/AArch64/mul.ll
index 500379d1cfdec..0d7a6a7dbcb11 100644
--- a/llvm/test/CodeGen/AArch64/mul.ll
+++ b/llvm/test/CodeGen/AArch64/mul.ll
@@ -76,8 +76,7 @@ define void @v2i8(ptr %p1, ptr %p2) {
 ; CHECK-SD-NEXT:    ld1 { v1.b }[4], [x9]
 ; CHECK-SD-NEXT:    mul v0.2s, v0.2s, v1.2s
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x0]
+; CHECK-SD-NEXT:    str b0, [x0]
 ; CHECK-SD-NEXT:    strb w8, [x0, #1]
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/neon-truncstore.ll b/llvm/test/CodeGen/AArch64/neon-truncstore.ll
index 3d3362d314a99..a070e3d7565ed 100644
--- a/llvm/test/CodeGen/AArch64/neon-truncstore.ll
+++ b/llvm/test/CodeGen/AArch64/neon-truncstore.ll
@@ -90,8 +90,7 @@ define void @v2i32_v2i8(<2 x i32> %a, ptr %result) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    mov w8, v0.s[1]
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    strb w9, [x0]
+; CHECK-NEXT:    str b0, [x0]
 ; CHECK-NEXT:    strb w8, [x0, #1]
 ; CHECK-NEXT:    ret
   %b = trunc <2 x i32> %a to <2 x i8>
@@ -157,8 +156,7 @@ define void @v2i16_v2i8(<2 x i16> %a, ptr %result) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    mov w8, v0.s[1]
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    strb w9, [x0]
+; CHECK-NEXT:    str b0, [x0]
 ; CHECK-NEXT:    strb w8, [x0, #1]
 ; CHECK-NEXT:    ret
   %b = trunc <2 x i16> %a to <2 x i8>
diff --git a/llvm/test/CodeGen/AArch64/nontemporal-load.ll b/llvm/test/CodeGen/AArch64/nontemporal-load.ll
index 959ac7f68e351..28cff55beff9e 100644
--- a/llvm/test/CodeGen/AArch64/nontemporal-load.ll
+++ b/llvm/test/CodeGen/AArch64/nontemporal-load.ll
@@ -449,10 +449,9 @@ define <33 x i8> @test_ldnp_v33i8(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v33i8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
-; CHECK-NEXT:    add x9, x8, #32
 ; CHECK-NEXT:    ldr b2, [x0, #32]
 ; CHECK-NEXT:    stp q0, q1, [x8]
-; CHECK-NEXT:    st1.b { v2 }[0], [x9]
+; CHECK-NEXT:    str b2, [x8, #32]
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-BE-LABEL: test_ldnp_v33i8:
diff --git a/llvm/test/CodeGen/AArch64/pr-cf624b2.ll b/llvm/test/CodeGen/AArch64/pr-cf624b2.ll
index ea9588e9e3db7..0b0540e559abd 100644
--- a/llvm/test/CodeGen/AArch64/pr-cf624b2.ll
+++ b/llvm/test/CodeGen/AArch64/pr-cf624b2.ll
@@ -14,9 +14,9 @@ define linkonce_odr void @_ZN1y2beEPiRK1vPmPS1_(<8 x i8> %0, ptr %agg.tmp.i) {
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEXT:    str b0, [sp]
 ; CHECK-NEXT:    orr x9, x8, #0xf
 ; CHECK-NEXT:    orr x10, x8, #0xe
-; CHECK-NEXT:    st1 { v0.b }[0], [x8]
 ; CHECK-NEXT:    st1 { v0.b }[15], [x9]
 ; CHECK-NEXT:    orr x9, x8, #0xc
 ; CHECK-NEXT:    st1 { v0.b }[12], [x9]
@@ -46,9 +46,9 @@ define linkonce_odr void @_ZN1y2beEPiRK1vPmPS1_(<8 x i8> %0, ptr %agg.tmp.i) {
 ; CHECK-NEXT:    mov w10, #9 // =0x9
 ; CHECK-NEXT:    st1 { v0.b }[10], [x9]
 ; CHECK-NEXT:    orr x9, x8, x10
+; CHECK-NEXT:    mov w10, #5 // =0x5
+; CHECK-NEXT:    orr x8, x8, x10
 ; CHECK-NEXT:    st1 { v0.b }[9], [x9]
-; CHECK-NEXT:    mov w9, #5 // =0x5
-; CHECK-NEXT:    orr x8, x8, x9
 ; CHECK-NEXT:    st1 { v0.b }[5], [x8]
 ; CHECK-NEXT:    ldr q0, [sp]
 ; CHECK-NEXT:    stp q0, q1, [x0]
diff --git a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
index 4d76994be204f..cbb3b06030bae 100644
--- a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
@@ -201,8 +201,7 @@ define void @v2i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-SD-NEXT:    sqadd v0.2s, v0.2s, v1.2s
 ; CHECK-SD-NEXT:    ushr v0.2s, v0.2s, #24
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x2]
+; CHECK-SD-NEXT:    str b0, [x2]
 ; CHECK-SD-NEXT:    strb w8, [x2, #1]
 ; CHECK-SD-NEXT:    ret
 ;
@@ -325,7 +324,7 @@ define void @v1i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-SD-NEXT:    ldr b0, [x0]
 ; CHECK-SD-NEXT:    ldr b1, [x1]
 ; CHECK-SD-NEXT:    sqadd v0.8b, v0.8b, v1.8b
-; CHECK-SD-NEXT:    st1 { v0.b }[0], [x2]
+; CHECK-SD-NEXT:    str b0, [x2]
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: v1i8:
diff --git a/llvm/test/CodeGen/AArch64/setcc-type-mismatch.ll b/llvm/test/CodeGen/AArch64/setcc-type-mismatch.ll
index c0a728014e390..950ac92a8b12f 100644
--- a/llvm/test/CodeGen/AArch64/setcc-type-mismatch.ll
+++ b/llvm/test/CodeGen/AArch64/setcc-type-mismatch.ll
@@ -12,8 +12,7 @@ define void @test_mismatched_setcc(<4 x i22> %l, <4 x i22> %r, ptr %addr) {
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI0_0]
 ; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    addv s0, v0.4s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strb w8, [x0]
+; CHECK-NEXT:    str b0, [x0]
 ; CHECK-NEXT:    ret
 
   %tst = icmp eq <4 x i22> %l, %r
diff --git a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
index ae2a16929e254..04b379f455008 100644
--- a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
@@ -202,8 +202,7 @@ define void @v2i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-SD-NEXT:    sqsub v0.2s, v0.2s, v1.2s
 ; CHECK-SD-NEXT:    ushr v0.2s, v0.2s, #24
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x2]
+; CHECK-SD-NEXT:    str b0, [x2]
 ; CHECK-SD-NEXT:    strb w8, [x2, #1]
 ; CHECK-SD-NEXT:    ret
 ;
@@ -326,7 +325,7 @@ define void @v1i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-SD-NEXT:    ldr b0, [x0]
 ; CHECK-SD-NEXT:    ldr b1, [x1]
 ; CHECK-SD-NEXT:    sqsub v0.8b, v0.8b, v1.8b
-; CHECK-SD-NEXT:    st1 { v0.b }[0], [x2]
+; CHECK-SD-NEXT:    str b0, [x2]
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: v1i8:
diff --git a/llvm/test/CodeGen/AArch64/store.ll b/llvm/test/CodeGen/AArch64/store.ll
index 37a6ad08d4cb3..7ea957d9d165d 100644
--- a/llvm/test/CodeGen/AArch64/store.ll
+++ b/llvm/test/CodeGen/AArch64/store.ll
@@ -111,8 +111,7 @@ define void @store_v2i8(<2 x i8> %a, ptr %ptr){
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x0]
+; CHECK-SD-NEXT:    str b0, [x0]
 ; CHECK-SD-NEXT:    strb w8, [x0, #1]
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/sub.ll b/llvm/test/CodeGen/AArch64/sub.ll
index 8183a82f21cb5..91a17a89af6e1 100644
--- a/llvm/test/CodeGen/AArch64/sub.ll
+++ b/llvm/test/CodeGen/AArch64/sub.ll
@@ -64,8 +64,7 @@ define void @v2i8(ptr %p1, ptr %p2) {
 ; CHECK-SD-NEXT:    ld1 { v1.b }[4], [x9]
 ; CHECK-SD-NEXT:    sub v0.2s, v0.2s, v1.2s
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x0]
+; CHECK-SD-NEXT:    str b0, [x0]
 ; CHECK-SD-NEXT:    strb w8, [x0, #1]
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll
index aa1adfd306a4c..89a06bc9d5b4e 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll
@@ -73,8 +73,7 @@ define void @alloc_v6i8(ptr %st_ptr) nounwind {
 ; CHECK-NEXT:    zip1 z1.s, z1.s, z0.s
 ; CHECK-NEXT:    st1b { z1.h }, p0, [x8]
 ; CHECK-NEXT:    ld1h { z1.s }, p1/z, [x8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strb w8, [x19, #2]
+; CHECK-NEXT:    str b0, [x19, #2]
 ; CHECK-NEXT:    str h1, [x19]
 ; CHECK-NEXT:    ldp x30, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #32
@@ -119,11 +118,11 @@ define void @alloc_v32i8(ptr %st_ptr) nounwind {
 ; CHECK-NEXT:    mov x0, sp
 ; CHECK-NEXT:    bl def
 ; CHECK-NEXT:    adrp x8, .LCPI2_0
-; CHECK-NEXT:    ldp q0, q2, [sp]
+; CHECK-NEXT:    ldr q0, [sp]
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI2_0]
 ; CHECK-NEXT:    tbl z0.b, { z0.b }, z1.b
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    strb w8, [x19, #8]
+; CHECK-NEXT:    ldr q1, [sp, #16]
+; CHECK-NEXT:    str b1, [x19, #8]
 ; CHECK-NEXT:    str d0, [x19]
 ; CHECK-NEXT:    ldp x30, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #48
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-gather-scatter.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-gather-scatter.ll
index d9f8482a3c503..b1ac9469c0573 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-gather-scatter.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-gather-scatter.ll
@@ -20,9 +20,8 @@ define <2 x i64> @masked_gather_v2i64(ptr %a, ptr %b) vscale_range(2, 2) {
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uaddv d0, p0, z0.d
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    strb w8, [sp, #12]
-; CHECK-NEXT:    and w8, w8, #0xff
+; CHECK-NEXT:    str b0, [sp, #12]
+; CHECK-NEXT:    ldrb w8, [sp, #12]
 ; CHECK-NEXT:    tbz w8, #0, .LBB0_2
 ; CHECK-NEXT:  // %bb.1: // %cond.load
 ; CHECK-NEXT:    fmov x9, d1
@@ -109,11 +108,10 @@ define void @masked_scatter_v2i64(ptr %a, ptr %b) vscale_range(2, 2) {
 ; CHECK-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    mov z2.d, p1/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    and z1.d, z2.d, z1.d
-; CHECK-NEXT:    uaddv d1, p0, z1.d
-; CHECK-NEXT:    fmov x8, d1
+; CHECK-NEXT:    uaddv d2, p0, z1.d
 ; CHECK-NEXT:    ldr q1, [x1]
-; CHECK-NEXT:    strb w8, [sp, #12]
-; CHECK-NEXT:    and w8, w8, #0xff
+; CHECK-NEXT:    str b2, [sp, #12]
+; CHECK-NEXT:    ldrb w8, [sp, #12]
 ; CHECK-NEXT:    tbnz w8, #0, .LBB1_3
 ; CHECK-NEXT:  // %bb.1: // %else
 ; CHECK-NEXT:    tbnz w8, #1, .LBB1_4
diff --git a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
index d0173307bd830..edd96ae4836a4 100644
--- a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
@@ -199,8 +199,7 @@ define void @v2i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-SD-NEXT:    add v0.2s, v0.2s, v1.2s
 ; CHECK-SD-NEXT:    umin v0.2s, v0.2s, v2.2s
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x2]
+; CHECK-SD-NEXT:    str b0, [x2]
 ; CHECK-SD-NEXT:    strb w8, [x2, #1]
 ; CHECK-SD-NEXT:    ret
 ;
@@ -324,7 +323,7 @@ define void @v1i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-SD-NEXT:    ldr b0, [x0]
 ; CHECK-SD-NEXT:    ldr b1, [x1]
 ; CHECK-SD-NEXT:    uqadd v0.8b, v0.8b, v1.8b
-; CHECK-SD-NEXT:    st1 { v0.b }[0], [x2]
+; CHECK-SD-NEXT:    str b0, [x2]
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: v1i8:
diff --git a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
index dc3ebfb0682ca..63ca1b51c2291 100644
--- a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
@@ -198,8 +198,7 @@ define void @v2i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-SD-NEXT:    mov v1.s[1], w11
 ; CHECK-SD-NEXT:    uqsub v0.2s, v0.2s, v1.2s
 ; CHECK-SD-NEXT:    mov w8, v0.s[1]
-; CHECK-SD-NEXT:    fmov w9, s0
-; CHECK-SD-NEXT:    strb w9, [x2]
+; CHECK-SD-NEXT:    str b0, [x2]
 ; CHECK-SD-NEXT:    strb w8, [x2, #1]
 ; CHECK-SD-NEXT:    ret
 ;
@@ -321,7 +320,7 @@ define void @v1i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-SD-NEXT:    ldr b0, [x0]
 ; CHECK-SD-NEXT:    ldr b1, [x1]
 ; CHECK-SD-NEXT:    uqsub v0.8b, v0.8b, v1.8b
-; CHECK-SD-NEXT:    st1 { v0.b }[0], [x2]
+; CHECK-SD-NEXT:    str b0, [x2]
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: v1i8:
diff --git a/llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll b/llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll
index dd7a9c6d7768b..d9b5a42ba98a6 100644
--- a/llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll
+++ b/llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll
@@ -56,8 +56,7 @@ define void @store_4_elements(<4 x i32> %vec, ptr %out) {
 ; CHECK-NEXT:    ldr q1, [x8, lCPI2_0 at PAGEOFF]
 ; CHECK-NEXT:    bic.16b v0, v1, v0
 ; CHECK-NEXT:    addv.4s s0, v0
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strb w8, [x0]
+; CHECK-NEXT:    str b0, [x0]
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    .loh AdrpLdr Lloh4, Lloh5
 
@@ -99,8 +98,7 @@ define void @add_trunc_compare_before_store(<4 x i32> %vec, ptr %out) {
 ; CHECK-NEXT:    cmlt.4s v0, v0, #0
 ; CHECK-NEXT:    and.16b v0, v0, v1
 ; CHECK-NEXT:    addv.4s s0, v0
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strb w8, [x0]
+; CHECK-NEXT:    str b0, [x0]
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    .loh AdrpLdr Lloh8, Lloh9
 
@@ -141,7 +139,7 @@ define void @store_8_elements_64_bit_vector(<8 x i8> %vec, ptr %out) {
 ; CHECK-NEXT:    ldr d1, [x8, lCPI6_0 at PAGEOFF]
 ; CHECK-NEXT:    bic.8b v0, v1, v0
 ; CHECK-NEXT:    addv.8b b0, v0
-; CHECK-NEXT:    st1.b { v0 }[0], [x0]
+; CHECK-NEXT:    str b0, [x0]
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    .loh AdrpLdr Lloh12, Lloh13
 
@@ -182,8 +180,7 @@ define void @store_2_elements_64_bit_vector(<2 x i32> %vec, ptr %out) {
 ; CHECK-NEXT:    ldr d1, [x8, lCPI8_0 at PAGEOFF]
 ; CHECK-NEXT:    bic.8b v0, v1, v0
 ; CHECK-NEXT:    addp.2s v0, v0, v0
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strb w8, [x0]
+; CHECK-NEXT:    str b0, [x0]
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    .loh AdrpLdr Lloh16, Lloh17
 
diff --git a/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll b/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
index b52cbfe08156b..8ab8f537398ae 100644
--- a/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
+++ b/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
@@ -403,7 +403,7 @@ define void @store_trunc_add_from_64bits(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    add.4h v0, v0, v1
 ; CHECK-NEXT:    st1.b { v0 }[2], [x8]
 ; CHECK-NEXT:    st1.b { v0 }[4], [x9]
-; CHECK-NEXT:    st1.b { v0 }[0], [x1]
+; CHECK-NEXT:    str b0, [x1]
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    .loh AdrpLdr Lloh0, Lloh1
 ;
@@ -592,7 +592,7 @@ define void @shift_trunc_store(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    ushr.4s v0, v0, #16
 ; CHECK-NEXT:    st1.b { v0 }[4], [x8]
 ; CHECK-NEXT:    st1.b { v0 }[8], [x9]
-; CHECK-NEXT:    st1.b { v0 }[0], [x1]
+; CHECK-NEXT:    str b0, [x1]
 ; CHECK-NEXT:    ret
 ;
 ; BE-LABEL: shift_trunc_store:
@@ -626,7 +626,7 @@ define void @shift_trunc_store_default_align(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    ushr.4s v0, v0, #16
 ; CHECK-NEXT:    st1.b { v0 }[4], [x8]
 ; CHECK-NEXT:    st1.b { v0 }[8], [x9]
-; CHECK-NEXT:    st1.b { v0 }[0], [x1]
+; CHECK-NEXT:    str b0, [x1]
 ; CHECK-NEXT:    ret
 ;
 ; BE-LABEL: shift_trunc_store_default_align:
@@ -660,7 +660,7 @@ define void @shift_trunc_store_align_4(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    ushr.4s v0, v0, #16
 ; CHECK-NEXT:    st1.b { v0 }[4], [x8]
 ; CHECK-NEXT:    st1.b { v0 }[8], [x9]
-; CHECK-NEXT:    st1.b { v0 }[0], [x1]
+; CHECK-NEXT:    str b0, [x1]
 ; CHECK-NEXT:    ret
 ;
 ; BE-LABEL: shift_trunc_store_align_4:
@@ -693,9 +693,8 @@ define void @shift_trunc_store_const_offset_1(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    add x9, x1, #3
 ; CHECK-NEXT:    ushr.4s v0, v0, #16
 ; CHECK-NEXT:    st1.b { v0 }[4], [x8]
-; CHECK-NEXT:    add x8, x1, #1
 ; CHECK-NEXT:    st1.b { v0 }[8], [x9]
-; CHECK-NEXT:    st1.b { v0 }[0], [x8]
+; CHECK-NEXT:    str b0, [x1, #1]
 ; CHECK-NEXT:    ret
 ;
 ; BE-LABEL: shift_trunc_store_const_offset_1:
@@ -729,9 +728,8 @@ define void @shift_trunc_store_const_offset_3(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    add x9, x1, #5
 ; CHECK-NEXT:    ushr.4s v0, v0, #16
 ; CHECK-NEXT:    st1.b { v0 }[4], [x8]
-; CHECK-NEXT:    add x8, x1, #3
 ; CHECK-NEXT:    st1.b { v0 }[8], [x9]
-; CHECK-NEXT:    st1.b { v0 }[0], [x8]
+; CHECK-NEXT:    str b0, [x1, #3]
 ; CHECK-NEXT:    ret
 ;
 ; BE-LABEL: shift_trunc_store_const_offset_3:
@@ -807,12 +805,12 @@ define void @load_v3i8_zext_to_3xi32_add_trunc_store(ptr %src) {
 ; CHECK-NEXT:    add x8, x0, #2
 ; CHECK-NEXT:    orr w9, w10, w9, lsl #16
 ; CHECK-NEXT:    fmov s0, w9
+; CHECK-NEXT:    add x9, x0, #1
 ; CHECK-NEXT:    zip1.8b v0, v0, v0
 ; CHECK-NEXT:    uaddw.4s v0, v1, v0
 ; CHECK-NEXT:    st1.b { v0 }[8], [x8]
-; CHECK-NEXT:    add x8, x0, #1
-; CHECK-NEXT:    st1.b { v0 }[0], [x0]
-; CHECK-NEXT:    st1.b { v0 }[4], [x8]
+; CHECK-NEXT:    st1.b { v0 }[4], [x9]
+; CHECK-NEXT:    str b0, [x0]
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    .loh AdrpLdr Lloh4, Lloh5
 ;
@@ -860,12 +858,12 @@ define void @load_v3i8_sext_to_3xi32_add_trunc_store(ptr %src) {
 ; CHECK-NEXT:    add x8, x0, #2
 ; CHECK-NEXT:    orr w9, w10, w9, lsl #16
 ; CHECK-NEXT:    fmov s0, w9
+; CHECK-NEXT:    add x9, x0, #1
 ; CHECK-NEXT:    zip1.8b v0, v0, v0
 ; CHECK-NEXT:    uaddw.4s v0, v1, v0
 ; CHECK-NEXT:    st1.b { v0 }[8], [x8]
-; CHECK-NEXT:    add x8, x0, #1
-; CHECK-NEXT:    st1.b { v0 }[0], [x0]
-; CHECK-NEXT:    st1.b { v0 }[4], [x8]
+; CHECK-NEXT:    st1.b { v0 }[4], [x9]
+; CHECK-NEXT:    str b0, [x0]
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    .loh AdrpLdr Lloh6, Lloh7
 ;
diff --git a/llvm/test/CodeGen/AArch64/vector-compress.ll b/llvm/test/CodeGen/AArch64/vector-compress.ll
index 710ea70d678c5..f990bdc2e5615 100644
--- a/llvm/test/CodeGen/AArch64/vector-compress.ll
+++ b/llvm/test/CodeGen/AArch64/vector-compress.ll
@@ -109,7 +109,7 @@ define <16 x i8> @test_compress_v16i8(<16 x i8> %vec, <16 x i1> %mask) {
 ; CHECK-NEXT:    shl.16b v1, v1, #7
 ; CHECK-NEXT:    mov x12, sp
 ; CHECK-NEXT:    mov x8, sp
-; CHECK-NEXT:    st1.b { v0 }[0], [x8]
+; CHECK-NEXT:    str b0, [sp]
 ; CHECK-NEXT:    mov x13, sp
 ; CHECK-NEXT:    cmlt.16b v1, v1, #0
 ; CHECK-NEXT:    umov.b w9, v1[0]



More information about the llvm-commits mailing list