[llvm] fbd6d54 - [AArch64] Fold NEON splats into users by using SVE immediates (#165559)

via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 13 03:15:03 PDT 2026


Author: Benjamin Maxwell
Date: 2026-03-13T10:14:58Z
New Revision: fbd6d5409bc54e3ed272dd5c6a10e976bcac8047

URL: https://github.com/llvm/llvm-project/commit/fbd6d5409bc54e3ed272dd5c6a10e976bcac8047
DIFF: https://github.com/llvm/llvm-project/commit/fbd6d5409bc54e3ed272dd5c6a10e976bcac8047.diff

LOG: [AArch64] Fold NEON splats into users by using SVE immediates (#165559)

This patch adds patterns that attempt to fold NEON constant splats into
users by promoting the users to use SVE, when the splat immediate is a
legal SVE immediate operand.

This is done as ISEL patterns to avoid folding to SVE too early, which
can disrupt other patterns/combines.

Added: 
    llvm/test/CodeGen/AArch64/imm-splat-ops.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/test/CodeGen/AArch64/aarch64-smull.ll
    llvm/test/CodeGen/AArch64/extract-vector-cmp.ll
    llvm/test/CodeGen/AArch64/partial-reduction-add-predicated.ll
    llvm/test/CodeGen/AArch64/reassocmls.ll
    llvm/test/CodeGen/AArch64/zext-to-tbl.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 092b6bf00396d..1b706411791e9 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -500,11 +500,19 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
 
   bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift,
                           bool Negate);
+  bool SelectSVEAddSubImm(SDLoc DL, APInt Value, MVT VT, SDValue &Imm,
+                          SDValue &Shift, bool Negate);
   bool SelectSVEAddSubSSatImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift,
                               bool Negate);
   bool SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
   bool SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm, bool Invert);
 
+  // Match `<NEON Splat> SVEImm` (where <NEON Splat> could be fmov, movi, etc).
+  bool SelectNEONSplatOfSVELogicalImm(SDValue N, SDValue &Imm);
+  bool SelectNEONSplatOfSVEAddSubImm(SDValue N, SDValue &Imm, SDValue &Shift);
+  bool SelectNEONSplatOfSVEArithSImm(SDValue N, SDValue &Imm);
+
+  bool SelectSVESignedArithImm(SDLoc DL, APInt Value, SDValue &Imm);
   bool SelectSVESignedArithImm(SDValue N, SDValue &Imm);
   bool SelectSVEShiftImm(SDValue N, uint64_t Low, uint64_t High,
                          bool AllowSaturation, SDValue &Imm);
@@ -597,6 +605,72 @@ static bool isIntImmediateEq(SDValue N, const uint64_t ImmExpected) {
 }
 #endif
 
+static APInt DecodeFMOVImm(uint64_t Imm, unsigned RegWidth) {
+  assert(RegWidth == 32 || RegWidth == 64);
+  if (RegWidth == 32)
+    return APInt(RegWidth,
+                 uint32_t(AArch64_AM::decodeAdvSIMDModImmType11(Imm)));
+  return APInt(RegWidth, AArch64_AM::decodeAdvSIMDModImmType12(Imm));
+}
+
+// Decodes the integer splat value from a NEON splat operation.
+static std::optional<APInt> DecodeNEONSplat(SDValue N) {
+  assert(N.getValueType().isInteger() && "Only integers are supported");
+  unsigned SplatWidth = N.getScalarValueSizeInBits();
+  if (N->getOpcode() == AArch64ISD::NVCAST) {
+    SDValue Op = N->getOperand(0);
+    if (Op.getOpcode() != AArch64ISD::FMOV ||
+        Op.getScalarValueSizeInBits() != N.getScalarValueSizeInBits())
+      return std::nullopt;
+    return DecodeFMOVImm(Op.getConstantOperandVal(0), SplatWidth);
+  }
+  if (N->getOpcode() == AArch64ISD::MOVI)
+    return APInt(SplatWidth, N.getConstantOperandVal(0));
+  if (N->getOpcode() == AArch64ISD::MOVIshift)
+    return APInt(SplatWidth, N.getConstantOperandVal(0)
+                                 << N.getConstantOperandVal(1));
+  if (N->getOpcode() == AArch64ISD::MVNIshift)
+    return ~APInt(SplatWidth, N.getConstantOperandVal(0)
+                                  << N.getConstantOperandVal(1));
+  if (N->getOpcode() == AArch64ISD::DUP)
+    if (auto *Const = dyn_cast<ConstantSDNode>(N->getOperand(0)))
+      return Const->getAPIntValue().trunc(SplatWidth);
+  // TODO: Recognize more splat-like NEON operations. See ConstantBuildVector
+  // in AArch64ISelLowering. AArch64ISD::MOVIedit support will allow more folds.
+  return std::nullopt;
+}
+
+bool AArch64DAGToDAGISel::SelectNEONSplatOfSVELogicalImm(SDValue N,
+                                                         SDValue &Imm) {
+  std::optional<APInt> ImmVal = DecodeNEONSplat(N);
+  if (!ImmVal)
+    return false;
+  uint64_t Encoding;
+  if (!AArch64_AM::isSVELogicalImm(N.getScalarValueSizeInBits(),
+                                   ImmVal->getZExtValue(), Encoding))
+    return false;
+
+  Imm = CurDAG->getTargetConstant(Encoding, SDLoc(N), MVT::i64);
+  return true;
+}
+
+bool AArch64DAGToDAGISel::SelectNEONSplatOfSVEAddSubImm(SDValue N, SDValue &Imm,
+                                                        SDValue &Shift) {
+  if (std::optional<APInt> ImmVal = DecodeNEONSplat(N))
+    return SelectSVEAddSubImm(SDLoc(N), *ImmVal,
+                              N.getValueType().getScalarType().getSimpleVT(),
+                              Imm, Shift,
+                              /*Negate=*/false);
+  return false;
+}
+
+bool AArch64DAGToDAGISel::SelectNEONSplatOfSVEArithSImm(SDValue N,
+                                                        SDValue &Imm) {
+  if (std::optional<APInt> ImmVal = DecodeNEONSplat(N))
+    return SelectSVESignedArithImm(SDLoc(N), *ImmVal, Imm);
+  return false;
+}
+
 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
     const SDValue &Op, const InlineAsm::ConstraintCode ConstraintID,
     std::vector<SDValue> &OutOps) {
@@ -4093,14 +4167,7 @@ bool AArch64DAGToDAGISel::SelectCVTFixedPointVec(SDValue N, SDValue &FixedPos,
                                           << N.getConstantOperandVal(1)));
     break;
   case AArch64ISD::FMOV:
-    assert(RegWidth == 32 || RegWidth == 64);
-    if (RegWidth == 32)
-      FVal = ImmToFloat(
-          APInt(RegWidth, (uint32_t)AArch64_AM::decodeAdvSIMDModImmType11(
-                              N.getConstantOperandVal(0))));
-    else
-      FVal = ImmToFloat(APInt(RegWidth, AArch64_AM::decodeAdvSIMDModImmType12(
-                                            N.getConstantOperandVal(0))));
+    FVal = ImmToFloat(DecodeFMOVImm(N.getConstantOperandVal(0), RegWidth));
     break;
   case AArch64ISD::DUP:
     if (isa<ConstantSDNode>(N.getOperand(0)))
@@ -4344,10 +4411,15 @@ bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm,
   if (!isa<ConstantSDNode>(N))
     return false;
 
-  SDLoc DL(N);
   APInt Val =
       cast<ConstantSDNode>(N)->getAPIntValue().trunc(VT.getFixedSizeInBits());
 
+  return SelectSVEAddSubImm(SDLoc(N), Val, VT, Imm, Shift, Negate);
+}
+
+bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDLoc DL, APInt Val, MVT VT,
+                                             SDValue &Imm, SDValue &Shift,
+                                             bool Negate) {
   if (Negate)
     Val = -Val;
 
@@ -4451,13 +4523,17 @@ bool AArch64DAGToDAGISel::SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm,
 }
 
 bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N, SDValue &Imm) {
-  if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
-    int64_t ImmVal = CNode->getSExtValue();
-    SDLoc DL(N);
-    if (ImmVal >= -128 && ImmVal < 128) {
-      Imm = CurDAG->getSignedTargetConstant(ImmVal, DL, MVT::i32);
-      return true;
-    }
+  if (auto CNode = dyn_cast<ConstantSDNode>(N))
+    return SelectSVESignedArithImm(SDLoc(N), CNode->getAPIntValue(), Imm);
+  return false;
+}
+
+bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDLoc DL, APInt Val,
+                                                  SDValue &Imm) {
+  int64_t ImmVal = Val.getSExtValue();
+  if (ImmVal >= -128 && ImmVal < 128) {
+    Imm = CurDAG->getSignedTargetConstant(ImmVal, DL, MVT::i32);
+    return true;
   }
   return false;
 }

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 4e67d067f4e10..081728bc44360 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -178,6 +178,10 @@ def sve_cnth_imm_neg : ComplexPattern<i64, 1, "SelectRDVLImm<1, 16, -8>">;
 def sve_cntw_imm_neg : ComplexPattern<i64, 1, "SelectRDVLImm<1, 16, -4>">;
 def sve_cntd_imm_neg : ComplexPattern<i64, 1, "SelectRDVLImm<1, 16, -2>">;
 
+def NEONSplatOfSVELogicalImmPat : ComplexPattern<vAny, 1, "SelectNEONSplatOfSVELogicalImm", []>;
+def NEONSplatOfSVEAddSubImm : ComplexPattern<vAny, 2, "SelectNEONSplatOfSVEAddSubImm", []>;
+def NEONSplatOfSVEArithSImm: ComplexPattern<vAny, 1, "SelectNEONSplatOfSVEArithSImm", []>;
+
 def SDT_AArch64Reduce : SDTypeProfile<1, 2, [SDTCisVec<1>, SDTCisVec<2>]>;
 def SDT_AArch64ReduceWithInit : SDTypeProfile<1, 3,
    [SDTCisVec<1>, SDTCVecEltisVT<1,i1>, SDTCisVec<3>, SDTCisSameNumEltsAs<1,3>]>;
@@ -4517,6 +4521,46 @@ def : InstAlias<"pfalse\t$Pd", (PFALSE PPRorPNR8:$Pd), 0>;
 
 }
 
+// Promote NEON operations to SVE when an SVE immediate form can be used.
+class PromoteNEONToSVEImm
+  <SDNode Opc, ValueType VT, ComplexPattern ImmPat, Instruction SVEImmInst, ValueType ImmType>
+  : Pat<(Opc (VT V128:$Vm), (VT (ImmPat ImmType:$imm))),
+      (EXTRACT_SUBREG (SVEImmInst
+        (INSERT_SUBREG (IMPLICIT_DEF), (VT V128: $Vm), zsub), ImmType:$imm), zsub)>;
+// Same as PromoteNEONToSVEImm but accepts immediates with a shift (for ADD/SUB).
+class PromoteNEONToSVEImmWithShift
+  <SDNode Opc, ValueType VT, ComplexPattern ImmPat, Instruction SVEImmInst>
+  : Pat<(Opc (VT V128:$Vm), (VT (ImmPat i32:$imm, i32:$shift))),
+      (EXTRACT_SUBREG (SVEImmInst
+        (INSERT_SUBREG (IMPLICIT_DEF), (VT V128: $Vm), zsub), i32:$imm, i32:$shift), zsub)>;
+
+let Predicates = [HasSVE_or_SME] in {
+  foreach VT = [v2i64, v4i32, v8i16, v16i8] in {
+    // Logical operations
+    def : PromoteNEONToSVEImm<and, VT, NEONSplatOfSVELogicalImmPat, AND_ZI, i64>;
+    def : PromoteNEONToSVEImm<xor, VT, NEONSplatOfSVELogicalImmPat, EOR_ZI, i64>;
+    def : PromoteNEONToSVEImm< or, VT, NEONSplatOfSVELogicalImmPat, ORR_ZI, i64>;
+  }
+
+  // Arith operations: ADD
+  def : PromoteNEONToSVEImmWithShift<add, v2i64, NEONSplatOfSVEAddSubImm, ADD_ZI_D>;
+  def : PromoteNEONToSVEImmWithShift<add, v4i32, NEONSplatOfSVEAddSubImm, ADD_ZI_S>;
+  def : PromoteNEONToSVEImmWithShift<add, v8i16, NEONSplatOfSVEAddSubImm, ADD_ZI_H>;
+  def : PromoteNEONToSVEImmWithShift<add, v16i8, NEONSplatOfSVEAddSubImm, ADD_ZI_B>;
+
+  // Arith operations: SUB
+  def : PromoteNEONToSVEImmWithShift<sub, v2i64, NEONSplatOfSVEAddSubImm, SUB_ZI_D>;
+  def : PromoteNEONToSVEImmWithShift<sub, v4i32, NEONSplatOfSVEAddSubImm, SUB_ZI_S>;
+  def : PromoteNEONToSVEImmWithShift<sub, v8i16, NEONSplatOfSVEAddSubImm, SUB_ZI_H>;
+  def : PromoteNEONToSVEImmWithShift<sub, v16i8, NEONSplatOfSVEAddSubImm, SUB_ZI_B>;
+
+  // Arith operations: MUL
+  def : PromoteNEONToSVEImm<mul, v2i64, NEONSplatOfSVEAddSubImm, MUL_ZI_D, i32>;
+  def : PromoteNEONToSVEImm<mul, v4i32, NEONSplatOfSVEArithSImm, MUL_ZI_S, i32>;
+  def : PromoteNEONToSVEImm<mul, v8i16, NEONSplatOfSVEArithSImm, MUL_ZI_H, i32>;
+  def : PromoteNEONToSVEImm<mul, v16i8, NEONSplatOfSVEArithSImm, MUL_ZI_B, i32>;
+}
+
 //===----------------------------------------------------------------------===//
 // Non-widening BFloat16 to BFloat16 instructions
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
index 80fe2c898e8e5..eb5180e9b86e0 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
@@ -1573,13 +1573,29 @@ entry:
 }
 
 define <8 x i16> @umull_and256_v8i16(<8 x i8> %src1, <8 x i16> %src2) {
-; CHECK-LABEL: umull_and256_v8i16:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.8h, #1, lsl #8
-; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    mul v0.8h, v0.8h, v1.8h
-; CHECK-NEXT:    ret
+; CHECK-NEON-LABEL: umull_and256_v8i16:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    movi v2.8h, #1, lsl #8
+; CHECK-NEON-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEON-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEON-NEXT:    mul v0.8h, v0.8h, v1.8h
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: umull_and256_v8i16:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-SVE-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-SVE-NEXT:    and z1.h, z1.h, #0x100
+; CHECK-SVE-NEXT:    mul v0.8h, v0.8h, v1.8h
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-GI-LABEL: umull_and256_v8i16:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    movi v2.8h, #1, lsl #8
+; CHECK-GI-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-GI-NEXT:    mul v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT:    ret
 entry:
   %in1 = zext <8 x i8> %src1 to <8 x i16>
   %in2 = and <8 x i16> %src2, <i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256>

diff  --git a/llvm/test/CodeGen/AArch64/extract-vector-cmp.ll b/llvm/test/CodeGen/AArch64/extract-vector-cmp.ll
index f5cf629b2a4a4..099594d5ca8aa 100644
--- a/llvm/test/CodeGen/AArch64/extract-vector-cmp.ll
+++ b/llvm/test/CodeGen/AArch64/extract-vector-cmp.ll
@@ -75,14 +75,13 @@ define void @vector_loop_with_icmp(ptr nocapture noundef writeonly %dest) {
 ; CHECK-LABEL: vector_loop_with_icmp:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    mov z1.d, #2 // =0x2
 ; CHECK-NEXT:    add x8, x0, #4
 ; CHECK-NEXT:    mov w9, #16 // =0x10
 ; CHECK-NEXT:    mov w10, #1 // =0x1
 ; CHECK-NEXT:    b .LBB5_2
 ; CHECK-NEXT:  .LBB5_1: // %pred.store.continue6
 ; CHECK-NEXT:    // in Loop: Header=BB5_2 Depth=1
-; CHECK-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    add z0.d, z0.d, #2 // =0x2
 ; CHECK-NEXT:    subs x9, x9, #2
 ; CHECK-NEXT:    add x8, x8, #8
 ; CHECK-NEXT:    b.eq .LBB5_6

diff  --git a/llvm/test/CodeGen/AArch64/imm-splat-ops.ll b/llvm/test/CodeGen/AArch64/imm-splat-ops.ll
new file mode 100644
index 0000000000000..90aeecd82a0e2
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/imm-splat-ops.ll
@@ -0,0 +1,320 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s -o - | FileCheck %s --check-prefix=CHECK-NEON
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+sve < %s -o - | FileCheck %s --check-prefix=CHECK-SVE
+
+define <4 x i32> @and(<4 x i32> %a) {
+; CHECK-NEON-LABEL: and:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    fmov v1.4s, #1.00000000
+; CHECK-NEON-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: and:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    and z0.s, z0.s, #0x3f800000
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %and = and <4 x i32> %a, splat (i32 1065353216)
+  ret <4 x i32> %and
+}
+
+define <4 x i32> @xor(<4 x i32> %a) {
+; CHECK-NEON-LABEL: xor:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    movi v1.4s, #127
+; CHECK-NEON-NEXT:    eor v0.16b, v0.16b, v1.16b
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: xor:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    eor z0.s, z0.s, #0x7f
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %and = xor <4 x i32> %a, splat (i32 127)
+  ret <4 x i32> %and
+}
+
+define <4 x i32> @or(<4 x i32> %a) {
+; CHECK-NEON-LABEL: or:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    mvni v1.4s, #127
+; CHECK-NEON-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: or:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    orr z0.s, z0.s, #0xffffff80
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %or = or <4 x i32> %a, splat (i32 -128)
+  ret <4 x i32> %or
+}
+
+define <2 x i64> @add_v2i64(<2 x i64> %a) {
+; CHECK-NEON-LABEL: add_v2i64:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    mov w8, #33 // =0x21
+; CHECK-NEON-NEXT:    dup v1.2d, x8
+; CHECK-NEON-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: add_v2i64:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    add z0.d, z0.d, #33 // =0x21
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %add = add <2 x i64> %a, splat (i64 33)
+  ret <2 x i64> %add
+}
+
+define <4 x i32> @add_v4i32(<4 x i32> %a) {
+; CHECK-NEON-LABEL: add_v4i32:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    movi v1.4s, #1
+; CHECK-NEON-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: add_v4i32:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    add z0.s, z0.s, #1 // =0x1
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %add = add <4 x i32> %a, splat (i32 1)
+  ret <4 x i32> %add
+}
+
+define <8 x i16> @add_v8i16(<8 x i16> %a) {
+; CHECK-NEON-LABEL: add_v8i16:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    movi v1.8h, #16
+; CHECK-NEON-NEXT:    add v0.8h, v0.8h, v1.8h
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: add_v8i16:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    add z0.h, z0.h, #16 // =0x10
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %add = add <8 x i16> %a, splat (i16 16)
+  ret <8 x i16> %add
+}
+
+define <16 x i8> @add_v16i8(<16 x i8> %a) {
+; CHECK-NEON-LABEL: add_v16i8:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    movi v1.16b, #2
+; CHECK-NEON-NEXT:    add v0.16b, v0.16b, v1.16b
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: add_v16i8:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    add z0.b, z0.b, #2 // =0x2
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %add = add <16 x i8> %a, splat (i8 2)
+  ret <16 x i8> %add
+}
+
+define <4 x i32> @add_not_multiple_of_256(<4 x i32> %a) {
+; CHECK-NEON-LABEL: add_not_multiple_of_256:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    mov w8, #513 // =0x201
+; CHECK-NEON-NEXT:    dup v1.4s, w8
+; CHECK-NEON-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: add_not_multiple_of_256:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    mov w8, #513 // =0x201
+; CHECK-SVE-NEXT:    dup v1.4s, w8
+; CHECK-SVE-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-SVE-NEXT:    ret
+entry:
+  %add = add <4 x i32> %a, splat (i32 513)
+  ret <4 x i32> %add
+}
+
+define <2 x i64> @sub_v2i64(<2 x i64> %a) {
+; CHECK-NEON-LABEL: sub_v2i64:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    mov w8, #64 // =0x40
+; CHECK-NEON-NEXT:    dup v1.2d, x8
+; CHECK-NEON-NEXT:    sub v0.2d, v0.2d, v1.2d
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: sub_v2i64:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    sub z0.d, z0.d, #64 // =0x40
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %sub = sub <2 x i64> %a, splat (i64 64)
+  ret <2 x i64> %sub
+}
+
+define <4 x i32> @sub_v4i32(<4 x i32> %a) {
+; CHECK-NEON-LABEL: sub_v4i32:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    movi v1.4s, #2, lsl #8
+; CHECK-NEON-NEXT:    sub v0.4s, v0.4s, v1.4s
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: sub_v4i32:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    sub z0.s, z0.s, #512 // =0x200
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %sub = sub <4 x i32> %a, splat (i32 512)
+  ret <4 x i32> %sub
+}
+
+define <8 x i16> @sub_v8i16(<8 x i16> %a) {
+; CHECK-NEON-LABEL: sub_v8i16:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    movi v1.8h, #99
+; CHECK-NEON-NEXT:    sub v0.8h, v0.8h, v1.8h
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: sub_v8i16:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    sub z0.h, z0.h, #99 // =0x63
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %sub = sub <8 x i16> %a, splat (i16 99)
+  ret <8 x i16> %sub
+}
+
+define <16 x i8> @sub_v16i8(<16 x i8> %a) {
+; CHECK-NEON-LABEL: sub_v16i8:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    movi v1.16b, #7
+; CHECK-NEON-NEXT:    sub v0.16b, v0.16b, v1.16b
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: sub_v16i8:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    sub z0.b, z0.b, #7 // =0x7
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %sub = sub <16 x i8> %a, splat (i8 7)
+  ret <16 x i8> %sub
+}
+
+define <2 x i64> @mul_v2i64(<2 x i64> %a) {
+; CHECK-NEON-LABEL: mul_v2i64:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    fmov x10, d0
+; CHECK-NEON-NEXT:    mov w8, #123 // =0x7b
+; CHECK-NEON-NEXT:    mov x9, v0.d[1]
+; CHECK-NEON-NEXT:    mul x10, x10, x8
+; CHECK-NEON-NEXT:    mul x8, x9, x8
+; CHECK-NEON-NEXT:    fmov d0, x10
+; CHECK-NEON-NEXT:    mov v0.d[1], x8
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: mul_v2i64:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    mov z1.d, #123 // =0x7b
+; CHECK-SVE-NEXT:    ptrue p0.d, vl2
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %mul = mul <2 x i64> %a, splat (i64 123)
+  ret <2 x i64> %mul
+}
+
+define <4 x i32> @mul_v4i32(<4 x i32> %a) {
+; CHECK-NEON-LABEL: mul_v4i32:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    movi v1.4s, #33
+; CHECK-NEON-NEXT:    mul v0.4s, v0.4s, v1.4s
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: mul_v4i32:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    mul z0.s, z0.s, #33
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %mul = mul <4 x i32> %a, splat (i32 33)
+  ret <4 x i32> %mul
+}
+
+define <8 x i16> @mul_v8i16(<8 x i16> %a) {
+; CHECK-NEON-LABEL: mul_v8i16:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    mvni v1.8h, #32
+; CHECK-NEON-NEXT:    mul v0.8h, v0.8h, v1.8h
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: mul_v8i16:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    mul z0.h, z0.h, #-33
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %mul = mul <8 x i16> %a, splat (i16 -33)
+  ret <8 x i16> %mul
+}
+
+define <16 x i8> @mul_v16i8(<16 x i8> %a) {
+; CHECK-NEON-LABEL: mul_v16i8:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    movi v1.16b, #253
+; CHECK-NEON-NEXT:    mul v0.16b, v0.16b, v1.16b
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: mul_v16i8:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-SVE-NEXT:    mul z0.b, z0.b, #-3
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+entry:
+  %mul = mul <16 x i8> %a, splat (i8 -3)
+  ret <16 x i8> %mul
+}
+
+define <4 x i32> @mul_imm_too_big(<4 x i32> %a) {
+; CHECK-NEON-LABEL: mul_imm_too_big:
+; CHECK-NEON:       // %bb.0: // %entry
+; CHECK-NEON-NEXT:    mov w8, #320 // =0x140
+; CHECK-NEON-NEXT:    dup v1.4s, w8
+; CHECK-NEON-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEON-NEXT:    ret
+;
+; CHECK-SVE-LABEL: mul_imm_too_big:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    mov w8, #320 // =0x140
+; CHECK-SVE-NEXT:    dup v1.4s, w8
+; CHECK-SVE-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-SVE-NEXT:    ret
+entry:
+  %mul = add <4 x i32> %a, splat (i32 320)
+  ret <4 x i32> %mul
+}

diff  --git a/llvm/test/CodeGen/AArch64/partial-reduction-add-predicated.ll b/llvm/test/CodeGen/AArch64/partial-reduction-add-predicated.ll
index 24cdd0a852222..e616467480670 100644
--- a/llvm/test/CodeGen/AArch64/partial-reduction-add-predicated.ll
+++ b/llvm/test/CodeGen/AArch64/partial-reduction-add-predicated.ll
@@ -23,9 +23,8 @@ define <4 x i32> @predicate_dot_by_C_fixed_length(<4 x i32> %acc, <16 x i1> %p,
 ; CHECK-LABEL: predicate_dot_by_C_fixed_length:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shl v1.16b, v1.16b, #7
-; CHECK-NEXT:    movi v3.16b, #127
 ; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    and z1.b, z1.b, #0x7f
 ; CHECK-NEXT:    sdot v0.4s, v2.16b, v1.16b
 ; CHECK-NEXT:    ret
  %ext.1 = sext <16 x i8> %a to <16 x i32>
@@ -66,8 +65,8 @@ define <vscale x 4 x i32> @predicate_dot_by_C_scalable(<vscale x 4 x i32> %acc,
 define <4 x i32> @predicate_ext_mul_fixed_length(<4 x i32> %acc, <16 x i1> %p, <16 x i8> %a) #0 {
 ; CHECK-LABEL: predicate_ext_mul_fixed_length:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi v3.16b, #1
-; CHECK-NEXT:    and v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    and z1.b, z1.b, #0x1
 ; CHECK-NEXT:    sdot v0.4s, v2.16b, v1.16b
 ; CHECK-NEXT:    ret
  %ext = sext <16 x i8> %a to <16 x i32>

diff  --git a/llvm/test/CodeGen/AArch64/reassocmls.ll b/llvm/test/CodeGen/AArch64/reassocmls.ll
index 5a769a577ce92..c199f832c57ac 100644
--- a/llvm/test/CodeGen/AArch64/reassocmls.ll
+++ b/llvm/test/CodeGen/AArch64/reassocmls.ll
@@ -266,10 +266,10 @@ define <8 x i16> @mls_v8i16_C(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16
 define <8 x i16> @mla_v8i16_C(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d, <8 x i16> %e) {
 ; CHECK-SD-LABEL: mla_v8i16_C:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    mul v1.8h, v2.8h, v1.8h
-; CHECK-SD-NEXT:    movi v0.8h, #10
-; CHECK-SD-NEXT:    mla v1.8h, v4.8h, v3.8h
-; CHECK-SD-NEXT:    add v0.8h, v1.8h, v0.8h
+; CHECK-SD-NEXT:    mul v0.8h, v2.8h, v1.8h
+; CHECK-SD-NEXT:    mla v0.8h, v4.8h, v3.8h
+; CHECK-SD-NEXT:    add z0.h, z0.h, #10 // =0xa
+; CHECK-SD-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: mla_v8i16_C:

diff  --git a/llvm/test/CodeGen/AArch64/zext-to-tbl.ll b/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
index 74a717f1635a3..a4caf0e8068d4 100644
--- a/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
@@ -1238,7 +1238,6 @@ exit:
 define void @zext_v16i4_to_v16i32_in_loop(ptr %src, ptr %dst) {
 ; CHECK-LABEL: zext_v16i4_to_v16i32_in_loop:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    movi.4s v0, #15
 ; CHECK-NEXT:    mov x8, xzr
 ; CHECK-NEXT:  LBB13_1: ; %loop
 ; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -1251,49 +1250,48 @@ define void @zext_v16i4_to_v16i32_in_loop(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    ubfx w15, w9, #16, #4
 ; CHECK-NEXT:    lsr x11, x9, #36
 ; CHECK-NEXT:    lsr w14, w9, #20
-; CHECK-NEXT:    fmov s1, w12
-; CHECK-NEXT:    fmov s2, w13
+; CHECK-NEXT:    fmov s0, w12
+; CHECK-NEXT:    fmov s1, w13
 ; CHECK-NEXT:    lsr w12, w9, #4
-; CHECK-NEXT:    fmov s3, w15
-; CHECK-NEXT:    mov.h v1[1], w10
+; CHECK-NEXT:    fmov s2, w15
+; CHECK-NEXT:    mov.h v0[1], w10
 ; CHECK-NEXT:    and w10, w9, #0xf
-; CHECK-NEXT:    mov.h v2[1], w11
-; CHECK-NEXT:    fmov s4, w10
+; CHECK-NEXT:    mov.h v1[1], w11
+; CHECK-NEXT:    fmov s3, w10
 ; CHECK-NEXT:    lsr x11, x9, #56
-; CHECK-NEXT:    mov.h v3[1], w14
+; CHECK-NEXT:    mov.h v2[1], w14
 ; CHECK-NEXT:    lsr x10, x9, #40
-; CHECK-NEXT:    mov.h v4[1], w12
+; CHECK-NEXT:    mov.h v3[1], w12
 ; CHECK-NEXT:    lsr w12, w9, #24
-; CHECK-NEXT:    mov.h v1[2], w11
+; CHECK-NEXT:    mov.h v0[2], w11
 ; CHECK-NEXT:    lsr w11, w9, #8
-; CHECK-NEXT:    mov.h v2[2], w10
+; CHECK-NEXT:    mov.h v1[2], w10
 ; CHECK-NEXT:    lsr x10, x9, #60
-; CHECK-NEXT:    mov.h v3[2], w12
+; CHECK-NEXT:    mov.h v2[2], w12
 ; CHECK-NEXT:    lsr x12, x9, #44
-; CHECK-NEXT:    mov.h v4[2], w11
+; CHECK-NEXT:    mov.h v3[2], w11
 ; CHECK-NEXT:    lsr w11, w9, #28
 ; CHECK-NEXT:    lsr w9, w9, #12
-; CHECK-NEXT:    mov.h v1[3], w10
-; CHECK-NEXT:    mov.h v2[3], w12
-; CHECK-NEXT:    mov.h v3[3], w11
-; CHECK-NEXT:    mov.h v4[3], w9
+; CHECK-NEXT:    mov.h v0[3], w10
+; CHECK-NEXT:    mov.h v1[3], w12
+; CHECK-NEXT:    mov.h v2[3], w11
+; CHECK-NEXT:    mov.h v3[3], w9
+; CHECK-NEXT:    ushll.4s v0, v0, #0
 ; CHECK-NEXT:    ushll.4s v1, v1, #0
 ; CHECK-NEXT:    ushll.4s v2, v2, #0
 ; CHECK-NEXT:    ushll.4s v3, v3, #0
-; CHECK-NEXT:    ushll.4s v4, v4, #0
-; CHECK-NEXT:    and.16b v1, v1, v0
-; CHECK-NEXT:    and.16b v2, v2, v0
-; CHECK-NEXT:    and.16b v3, v3, v0
-; CHECK-NEXT:    and.16b v4, v4, v0
-; CHECK-NEXT:    stp q2, q1, [x1, #32]
-; CHECK-NEXT:    stp q4, q3, [x1], #64
+; CHECK-NEXT:    and z0.s, z0.s, #0xf
+; CHECK-NEXT:    and z1.s, z1.s, #0xf
+; CHECK-NEXT:    and z2.s, z2.s, #0xf
+; CHECK-NEXT:    and z3.s, z3.s, #0xf
+; CHECK-NEXT:    stp q1, q0, [x1, #32]
+; CHECK-NEXT:    stp q3, q2, [x1], #64
 ; CHECK-NEXT:    b.ne LBB13_1
 ; CHECK-NEXT:  ; %bb.2: ; %exit
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-BE-LABEL: zext_v16i4_to_v16i32_in_loop:
 ; CHECK-BE:       // %bb.0: // %entry
-; CHECK-BE-NEXT:    movi v0.4s, #15
 ; CHECK-BE-NEXT:    mov x8, xzr
 ; CHECK-BE-NEXT:  .LBB13_1: // %loop
 ; CHECK-BE-NEXT:    // =>This Inner Loop Header: Depth=1
@@ -1306,47 +1304,47 @@ define void @zext_v16i4_to_v16i32_in_loop(ptr %src, ptr %dst) {
 ; CHECK-BE-NEXT:    ubfx x15, x9, #44, #4
 ; CHECK-BE-NEXT:    lsr w12, w9, #24
 ; CHECK-BE-NEXT:    lsr x13, x9, #40
-; CHECK-BE-NEXT:    fmov s1, w11
+; CHECK-BE-NEXT:    fmov s0, w11
 ; CHECK-BE-NEXT:    lsr x11, x9, #60
-; CHECK-BE-NEXT:    fmov s2, w14
-; CHECK-BE-NEXT:    fmov s3, w15
-; CHECK-BE-NEXT:    fmov s4, w11
+; CHECK-BE-NEXT:    fmov s1, w14
+; CHECK-BE-NEXT:    fmov s2, w15
+; CHECK-BE-NEXT:    fmov s3, w11
 ; CHECK-BE-NEXT:    lsr w11, w9, #20
-; CHECK-BE-NEXT:    mov v1.h[1], w10
+; CHECK-BE-NEXT:    mov v0.h[1], w10
 ; CHECK-BE-NEXT:    lsr x10, x9, #56
-; CHECK-BE-NEXT:    mov v2.h[1], w12
+; CHECK-BE-NEXT:    mov v1.h[1], w12
 ; CHECK-BE-NEXT:    lsr w12, w9, #4
-; CHECK-BE-NEXT:    mov v3.h[1], w13
-; CHECK-BE-NEXT:    mov v4.h[1], w10
+; CHECK-BE-NEXT:    mov v2.h[1], w13
+; CHECK-BE-NEXT:    mov v3.h[1], w10
 ; CHECK-BE-NEXT:    lsr x10, x9, #36
-; CHECK-BE-NEXT:    mov v1.h[2], w12
+; CHECK-BE-NEXT:    mov v0.h[2], w12
 ; CHECK-BE-NEXT:    lsr x12, x9, #52
-; CHECK-BE-NEXT:    mov v2.h[2], w11
-; CHECK-BE-NEXT:    mov v3.h[2], w10
+; CHECK-BE-NEXT:    mov v1.h[2], w11
+; CHECK-BE-NEXT:    mov v2.h[2], w10
 ; CHECK-BE-NEXT:    lsr w10, w9, #16
 ; CHECK-BE-NEXT:    lsr x11, x9, #32
-; CHECK-BE-NEXT:    mov v4.h[2], w12
-; CHECK-BE-NEXT:    mov v1.h[3], w9
+; CHECK-BE-NEXT:    mov v3.h[2], w12
+; CHECK-BE-NEXT:    mov v0.h[3], w9
 ; CHECK-BE-NEXT:    lsr x9, x9, #48
-; CHECK-BE-NEXT:    mov v2.h[3], w10
-; CHECK-BE-NEXT:    mov v3.h[3], w11
+; CHECK-BE-NEXT:    mov v1.h[3], w10
+; CHECK-BE-NEXT:    mov v2.h[3], w11
 ; CHECK-BE-NEXT:    add x10, x1, #32
-; CHECK-BE-NEXT:    mov v4.h[3], w9
+; CHECK-BE-NEXT:    mov v3.h[3], w9
 ; CHECK-BE-NEXT:    add x9, x1, #48
+; CHECK-BE-NEXT:    ushll v0.4s, v0.4h, #0
 ; CHECK-BE-NEXT:    ushll v1.4s, v1.4h, #0
 ; CHECK-BE-NEXT:    ushll v2.4s, v2.4h, #0
 ; CHECK-BE-NEXT:    ushll v3.4s, v3.4h, #0
-; CHECK-BE-NEXT:    ushll v4.4s, v4.4h, #0
-; CHECK-BE-NEXT:    and v1.16b, v1.16b, v0.16b
-; CHECK-BE-NEXT:    and v2.16b, v2.16b, v0.16b
-; CHECK-BE-NEXT:    and v3.16b, v3.16b, v0.16b
-; CHECK-BE-NEXT:    and v4.16b, v4.16b, v0.16b
-; CHECK-BE-NEXT:    st1 { v1.4s }, [x9]
+; CHECK-BE-NEXT:    and z0.s, z0.s, #0xf
+; CHECK-BE-NEXT:    and z1.s, z1.s, #0xf
+; CHECK-BE-NEXT:    and z2.s, z2.s, #0xf
+; CHECK-BE-NEXT:    and z3.s, z3.s, #0xf
+; CHECK-BE-NEXT:    st1 { v0.4s }, [x9]
 ; CHECK-BE-NEXT:    add x9, x1, #16
-; CHECK-BE-NEXT:    st1 { v2.4s }, [x10]
-; CHECK-BE-NEXT:    st1 { v4.4s }, [x1]
+; CHECK-BE-NEXT:    st1 { v1.4s }, [x10]
+; CHECK-BE-NEXT:    st1 { v3.4s }, [x1]
 ; CHECK-BE-NEXT:    add x1, x1, #64
-; CHECK-BE-NEXT:    st1 { v3.4s }, [x9]
+; CHECK-BE-NEXT:    st1 { v2.4s }, [x9]
 ; CHECK-BE-NEXT:    b.ne .LBB13_1
 ; CHECK-BE-NEXT:  // %bb.2: // %exit
 ; CHECK-BE-NEXT:    ret


        


More information about the llvm-commits mailing list