[llvm] 7b338a6 - [CodeGen][AArch64] Generate Pseudo instructions for integer MLA/MAD/MLS/MSB

via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 9 21:29:29 PST 2023


Author: sgokhale
Date: 2023-03-10T10:58:18+05:30
New Revision: 7b338a691ec916cde3fd293123e32b490d260922

URL: https://github.com/llvm/llvm-project/commit/7b338a691ec916cde3fd293123e32b490d260922
DIFF: https://github.com/llvm/llvm-project/commit/7b338a691ec916cde3fd293123e32b490d260922.diff

LOG: [CodeGen][AArch64] Generate Pseudo instructions for integer MLA/MAD/MLS/MSB

Differential Revision: https://reviews.llvm.org/D145488

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/lib/Target/AArch64/SVEInstrFormats.td
    llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
    llvm/test/CodeGen/AArch64/sve-int-arith.ll
    llvm/test/CodeGen/AArch64/sve-pseudos-expand-undef.mir
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index f87b9e38fbaae..278f74ef341f8 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -401,14 +401,17 @@ def AArch64sub_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2),
                               (sub node:$op1, (vselect node:$pred, node:$op2, (SVEDup0)))]>;
 def AArch64mla_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
                              [(int_aarch64_sve_mla node:$pred, node:$op1, node:$op2, node:$op3),
-                              (add node:$op1, (AArch64mul_p_oneuse node:$pred, node:$op2, node:$op3)),
                               // add(a, select(mask, mul(b, c), splat(0))) -> mla(a, mask, b, c)
                               (add node:$op1, (vselect node:$pred, (AArch64mul_p_oneuse (SVEAllActive), node:$op2, node:$op3), (SVEDup0)))]>;
+// pattern for generating pseudo for MLA_ZPmZZ/MAD_ZPmZZ
+def AArch64mla_p : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
+                            [(add node:$op1, (AArch64mul_p_oneuse node:$pred, node:$op2, node:$op3))]>;
 def AArch64mls_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
                              [(int_aarch64_sve_mls node:$pred, node:$op1, node:$op2, node:$op3),
-                              (sub node:$op1, (AArch64mul_p_oneuse node:$pred, node:$op2, node:$op3)),
                               // sub(a, select(mask, mul(b, c), splat(0))) -> mls(a, mask, b, c)
                               (sub node:$op1, (vselect node:$pred, (AArch64mul_p_oneuse (SVEAllActive), node:$op2, node:$op3), (SVEDup0)))]>;
+def AArch64mls_p : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
+                             [(sub node:$op1, (AArch64mul_p_oneuse node:$pred, node:$op2, node:$op3))]>;
 def AArch64eor3 : PatFrags<(ops node:$op1, node:$op2, node:$op3),
                            [(int_aarch64_sve_eor3 node:$op1, node:$op2, node:$op3),
                             (xor node:$op1, (xor node:$op2, node:$op3))]>;
@@ -483,10 +486,13 @@ let Predicates = [HasSVEorSME] in {
   defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", ssubsat>;
   defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", usubsat>;
 
-  defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad>;
-  defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb>;
-  defm MLA_ZPmZZ : sve_int_mlas_vvv_pred<0b0, "mla", AArch64mla_m1>;
-  defm MLS_ZPmZZ : sve_int_mlas_vvv_pred<0b1, "mls", AArch64mls_m1>;
+  defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad, "MLA_ZPmZZ", /*isReverseInstr*/ 1>;
+  defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb, "MLS_ZPmZZ", /*isReverseInstr*/ 1>;
+  defm MLA_ZPmZZ : sve_int_mlas_vvv_pred<0b0, "mla", AArch64mla_m1, "MLA_ZPZZZ", "MAD_ZPmZZ">;
+  defm MLS_ZPmZZ : sve_int_mlas_vvv_pred<0b1, "mls", AArch64mls_m1, "MLS_ZPZZZ", "MSB_ZPmZZ">;
+
+  defm MLA_ZPZZZ : sve_int_3op_p_mladdsub<AArch64mla_p>;
+  defm MLS_ZPZZZ : sve_int_3op_p_mladdsub<AArch64mls_p>;
 
   // SVE predicated integer reductions.
   defm SADDV_VPZ : sve_int_reduce_0_saddv<0b000, "saddv", AArch64saddv_p>;

diff  --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 9c2913122f28b..71995fc26fb48 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -3123,11 +3123,16 @@ class sve_int_mladdsub_vvv_pred<bits<2> sz8_64, bits<1> opc, string asm,
   let hasSideEffects = 0;
 }
 
-multiclass sve_int_mladdsub_vvv_pred<bits<1> opc, string asm, SDPatternOperator op> {
-  def _B : sve_int_mladdsub_vvv_pred<0b00, opc, asm, ZPR8>;
-  def _H : sve_int_mladdsub_vvv_pred<0b01, opc, asm, ZPR16>;
-  def _S : sve_int_mladdsub_vvv_pred<0b10, opc, asm, ZPR32>;
-  def _D : sve_int_mladdsub_vvv_pred<0b11, opc, asm, ZPR64>;
+multiclass sve_int_mladdsub_vvv_pred<bits<1> opc, string asm, SDPatternOperator op,
+                                     string revname, bit isReverseInstr=0> {
+  def _B : sve_int_mladdsub_vvv_pred<0b00, opc, asm, ZPR8>,
+           SVEInstr2Rev<NAME # _B, revname # _B, isReverseInstr>;
+  def _H : sve_int_mladdsub_vvv_pred<0b01, opc, asm, ZPR16>,
+           SVEInstr2Rev<NAME # _H, revname # _H, isReverseInstr>;
+  def _S : sve_int_mladdsub_vvv_pred<0b10, opc, asm, ZPR32>,
+           SVEInstr2Rev<NAME # _S, revname # _S, isReverseInstr>;
+  def _D : sve_int_mladdsub_vvv_pred<0b11, opc, asm, ZPR64>,
+           SVEInstr2Rev<NAME # _D, revname # _D, isReverseInstr>;
 
   def : SVE_4_Op_Pat<nxv16i8, op, nxv16i1, nxv16i8, nxv16i8, nxv16i8, !cast<Instruction>(NAME # _B)>;
   def : SVE_4_Op_Pat<nxv8i16, op, nxv8i1, nxv8i16, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _H)>;
@@ -3156,16 +3161,21 @@ class sve_int_mlas_vvv_pred<bits<2> sz8_64, bits<1> opc, string asm,
   let Inst{4-0}   = Zda;
 
   let Constraints = "$Zda = $_Zda";
-  let DestructiveInstType = DestructiveOther;
+  let DestructiveInstType = DestructiveTernaryCommWithRev;
   let ElementSize = zprty.ElementSize;
   let hasSideEffects = 0;
 }
 
-multiclass sve_int_mlas_vvv_pred<bits<1> opc, string asm, SDPatternOperator op> {
-  def _B : sve_int_mlas_vvv_pred<0b00, opc, asm, ZPR8>;
-  def _H : sve_int_mlas_vvv_pred<0b01, opc, asm, ZPR16>;
-  def _S : sve_int_mlas_vvv_pred<0b10, opc, asm, ZPR32>;
-  def _D : sve_int_mlas_vvv_pred<0b11, opc, asm, ZPR64>;
+multiclass sve_int_mlas_vvv_pred<bits<1> opc, string asm, SDPatternOperator op,
+                                 string Ps, string revname, bit isReverseInstr=0> {
+  def _B : sve_int_mlas_vvv_pred<0b00, opc, asm, ZPR8>,
+           SVEPseudo2Instr<Ps # _B, 1>, SVEInstr2Rev<NAME # _B, revname # _B, isReverseInstr>;
+  def _H : sve_int_mlas_vvv_pred<0b01, opc, asm, ZPR16>,
+           SVEPseudo2Instr<Ps # _H, 1>, SVEInstr2Rev<NAME # _H, revname # _H, isReverseInstr>;
+  def _S : sve_int_mlas_vvv_pred<0b10, opc, asm, ZPR32>,
+           SVEPseudo2Instr<Ps # _S, 1>, SVEInstr2Rev<NAME # _S, revname # _S, isReverseInstr>;
+  def _D : sve_int_mlas_vvv_pred<0b11, opc, asm, ZPR64>,
+           SVEPseudo2Instr<Ps # _D, 1>, SVEInstr2Rev<NAME # _D, revname # _D, isReverseInstr>;
 
   def : SVE_4_Op_Pat<nxv16i8, op, nxv16i1, nxv16i8, nxv16i8, nxv16i8, !cast<Instruction>(NAME # _B)>;
   def : SVE_4_Op_Pat<nxv8i16, op, nxv8i1, nxv8i16, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _H)>;
@@ -3173,6 +3183,19 @@ multiclass sve_int_mlas_vvv_pred<bits<1> opc, string asm, SDPatternOperator op>
   def : SVE_4_Op_Pat<nxv2i64, op, nxv2i1, nxv2i64, nxv2i64, nxv2i64, !cast<Instruction>(NAME # _D)>;
 }
 
+//class for generating pseudo for SVE MLA/MAD/MLS/MSB
+multiclass sve_int_3op_p_mladdsub<SDPatternOperator op> {
+  def _UNDEF_B : PredThreeOpPseudo<NAME # _B, ZPR8,  FalseLanesUndef>;
+  def _UNDEF_H : PredThreeOpPseudo<NAME # _H, ZPR16, FalseLanesUndef>;
+  def _UNDEF_S : PredThreeOpPseudo<NAME # _S, ZPR32, FalseLanesUndef>;
+  def _UNDEF_D : PredThreeOpPseudo<NAME # _D, ZPR64, FalseLanesUndef>;
+
+  def : SVE_4_Op_Pat<nxv16i8, op, nxv16i1, nxv16i8, nxv16i8, nxv16i8, !cast<Instruction>(NAME # _UNDEF_B)>;
+  def : SVE_4_Op_Pat<nxv8i16, op, nxv8i1,  nxv8i16, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _UNDEF_H)>;
+  def : SVE_4_Op_Pat<nxv4i32, op, nxv4i1,  nxv4i32, nxv4i32, nxv4i32, !cast<Instruction>(NAME # _UNDEF_S)>;
+  def : SVE_4_Op_Pat<nxv2i64, op, nxv2i1,  nxv2i64, nxv2i64, nxv2i64, !cast<Instruction>(NAME # _UNDEF_D)>;
+}
+
 //===----------------------------------------------------------------------===//
 // SVE2 Integer Multiply-Add - Unpredicated Group
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
index b32dad71bbcb5..f96e411885df0 100644
--- a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
@@ -72,19 +72,18 @@ define void @scatter_f16_index_offset_var(ptr %base, i64 %offset, i64 %scale, <v
 ; CHECK-LABEL: scatter_f16_index_offset_var:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.d, #0, #1
-; CHECK-NEXT:    mov z3.d, x1
-; CHECK-NEXT:    mov z2.d, z1.d
-; CHECK-NEXT:    mov z4.d, z3.d
 ; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    mov z2.d, z1.d
+; CHECK-NEXT:    mov z3.d, x1
 ; CHECK-NEXT:    incd z2.d
-; CHECK-NEXT:    mla z3.d, p1/m, z1.d, z3.d
-; CHECK-NEXT:    mla z4.d, p1/m, z2.d, z4.d
+; CHECK-NEXT:    mad z1.d, p1/m, z3.d, z3.d
+; CHECK-NEXT:    mad z2.d, p1/m, z3.d, z3.d
 ; CHECK-NEXT:    punpklo p1.h, p0.b
-; CHECK-NEXT:    uunpklo z1.d, z0.s
+; CHECK-NEXT:    uunpklo z3.d, z0.s
 ; CHECK-NEXT:    punpkhi p0.h, p0.b
 ; CHECK-NEXT:    uunpkhi z0.d, z0.s
-; CHECK-NEXT:    st1h { z1.d }, p1, [x0, z3.d, lsl #1]
-; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z4.d, lsl #1]
+; CHECK-NEXT:    st1h { z3.d }, p1, [x0, z1.d, lsl #1]
+; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z2.d, lsl #1]
 ; CHECK-NEXT:    ret
   %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-int-arith.ll
index 98e7ecddbed8e..c3be583cbb0ae 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-arith.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64 -mattr=+sve < %s | FileCheck %s
 
 define <vscale x 2 x i64> @add_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: add_i64:
@@ -337,14 +337,11 @@ define <vscale x 16 x i8> @uqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b
   ret <vscale x 16 x i8> %res
 }
 
-; Next four cases should generate mad instruction once pseudo instructions are emitted for MLA/MAD
-
 define <vscale x 16 x i8> @mad_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
 ; CHECK-LABEL: mad_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    mla z2.b, p0/m, z0.b, z1.b
-; CHECK-NEXT:    mov z0.d, z2.d
+; CHECK-NEXT:    mad z0.b, p0/m, z1.b, z2.b
 ; CHECK-NEXT:    ret
   %prod = mul <vscale x 16 x i8> %a, %b
   %res = add <vscale x 16 x i8> %c, %prod
@@ -355,8 +352,7 @@ define <vscale x 8 x i16> @mad_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b,
 ; CHECK-LABEL: mad_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    mla z2.h, p0/m, z0.h, z1.h
-; CHECK-NEXT:    mov z0.d, z2.d
+; CHECK-NEXT:    mad z0.h, p0/m, z1.h, z2.h
 ; CHECK-NEXT:    ret
   %prod = mul <vscale x 8 x i16> %a, %b
   %res = add <vscale x 8 x i16> %c, %prod
@@ -367,8 +363,7 @@ define <vscale x 4 x i32> @mad_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b,
 ; CHECK-LABEL: mad_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    mla z2.s, p0/m, z0.s, z1.s
-; CHECK-NEXT:    mov z0.d, z2.d
+; CHECK-NEXT:    mad z0.s, p0/m, z1.s, z2.s
 ; CHECK-NEXT:    ret
   %prod = mul <vscale x 4 x i32> %a, %b
   %res = add <vscale x 4 x i32> %c, %prod
@@ -379,8 +374,7 @@ define <vscale x 2 x i64> @mad_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b,
 ; CHECK-LABEL: mad_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    mla z2.d, p0/m, z0.d, z1.d
-; CHECK-NEXT:    mov z0.d, z2.d
+; CHECK-NEXT:    mad z0.d, p0/m, z1.d, z2.d
 ; CHECK-NEXT:    ret
   %prod = mul <vscale x 2 x i64> %a, %b
   %res = add <vscale x 2 x i64> %c, %prod
@@ -445,14 +439,11 @@ define <vscale x 16 x i8> @mla_i8_multiuse(<vscale x 16 x i8> %a, <vscale x 16 x
   ret <vscale x 16 x i8> %res
 }
 
-; Next four cases should generate msb instruction once psuedo instruction is emitted for MLS/MSB
-
 define <vscale x 16 x i8> @msb_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
 ; CHECK-LABEL: msb_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    mls z2.b, p0/m, z0.b, z1.b
-; CHECK-NEXT:    mov z0.d, z2.d
+; CHECK-NEXT:    msb z0.b, p0/m, z1.b, z2.b
 ; CHECK-NEXT:    ret
   %prod = mul <vscale x 16 x i8> %a, %b
   %res = sub <vscale x 16 x i8> %c, %prod
@@ -463,8 +454,7 @@ define <vscale x 8 x i16> @msb_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b,
 ; CHECK-LABEL: msb_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    mls z2.h, p0/m, z0.h, z1.h
-; CHECK-NEXT:    mov z0.d, z2.d
+; CHECK-NEXT:    msb z0.h, p0/m, z1.h, z2.h
 ; CHECK-NEXT:    ret
   %prod = mul <vscale x 8 x i16> %a, %b
   %res = sub <vscale x 8 x i16> %c, %prod
@@ -475,8 +465,7 @@ define <vscale x 4 x i32> @msb_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b,
 ; CHECK-LABEL: msb_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    mls z2.s, p0/m, z0.s, z1.s
-; CHECK-NEXT:    mov z0.d, z2.d
+; CHECK-NEXT:    msb z0.s, p0/m, z1.s, z2.s
 ; CHECK-NEXT:    ret
   %prod = mul <vscale x 4 x i32> %a, %b
   %res = sub <vscale x 4 x i32> %c, %prod
@@ -487,8 +476,7 @@ define <vscale x 2 x i64> @msb_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b,
 ; CHECK-LABEL: msb_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    mls z2.d, p0/m, z0.d, z1.d
-; CHECK-NEXT:    mov z0.d, z2.d
+; CHECK-NEXT:    msb z0.d, p0/m, z1.d, z2.d
 ; CHECK-NEXT:    ret
   %prod = mul <vscale x 2 x i64> %a, %b
   %res = sub <vscale x 2 x i64> %c, %prod
@@ -546,8 +534,7 @@ define <vscale x 2 x i64> @mls_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b,
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z2.d, #0xffffffff
-; CHECK-NEXT:    mla z2.d, p0/m, z0.d, z1.d
-; CHECK-NEXT:    mov z0.d, z2.d
+; CHECK-NEXT:    mad z0.d, p0/m, z1.d, z2.d
 ; CHECK-NEXT:    ret
 {
   %1 = mul <vscale x 2 x i64> %a, %b
@@ -560,8 +547,7 @@ define <vscale x 2 x i64> @muladd_i64_negativeAddend(<vscale x 2 x i64> %a, <vsc
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z2.d, #0xffffffff00000001
-; CHECK-NEXT:    mla z2.d, p0/m, z0.d, z1.d
-; CHECK-NEXT:    mov z0.d, z2.d
+; CHECK-NEXT:    mad z0.d, p0/m, z1.d, z2.d
 ; CHECK-NEXT:    ret
 {
   %1 = mul <vscale x 2 x i64> %a, %b
@@ -575,8 +561,7 @@ define <vscale x 4 x i32> @muladd_i32_positiveAddend(<vscale x 4 x i32> %a, <vsc
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov z2.s, #0x10000
-; CHECK-NEXT:    mla z2.s, p0/m, z0.s, z1.s
-; CHECK-NEXT:    mov z0.d, z2.d
+; CHECK-NEXT:    mad z0.s, p0/m, z1.s, z2.s
 ; CHECK-NEXT:    ret
 {
   %1 = mul <vscale x 4 x i32> %a, %b
@@ -589,8 +574,7 @@ define <vscale x 4 x i32> @muladd_i32_negativeAddend(<vscale x 4 x i32> %a, <vsc
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov z2.s, #0xffff0000
-; CHECK-NEXT:    mla z2.s, p0/m, z0.s, z1.s
-; CHECK-NEXT:    mov z0.d, z2.d
+; CHECK-NEXT:    mad z0.s, p0/m, z1.s, z2.s
 ; CHECK-NEXT:    ret
 {
   %1 = mul <vscale x 4 x i32> %a, %b
@@ -616,8 +600,7 @@ define <vscale x 8 x i16> @muladd_i16_negativeAddend(<vscale x 8 x i16> %a, <vsc
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov z2.h, #-255 // =0xffffffffffffff01
-; CHECK-NEXT:    mla z2.h, p0/m, z0.h, z1.h
-; CHECK-NEXT:    mov z0.d, z2.d
+; CHECK-NEXT:    mad z0.h, p0/m, z1.h, z2.h
 ; CHECK-NEXT:    ret
 {
   %1 = mul <vscale x 8 x i16> %a, %b

diff  --git a/llvm/test/CodeGen/AArch64/sve-pseudos-expand-undef.mir b/llvm/test/CodeGen/AArch64/sve-pseudos-expand-undef.mir
index c81363f6133b7..1524fdd0336cb 100644
--- a/llvm/test/CodeGen/AArch64/sve-pseudos-expand-undef.mir
+++ b/llvm/test/CodeGen/AArch64/sve-pseudos-expand-undef.mir
@@ -20,3 +20,23 @@ body:             |
     RET_ReallyLR
 
 ...
+
+# CHECK: {{.*}} MSB_ZPmZZ_B {{.*}}
+---
+name: expand_mls_to_msb
+body:             |
+  bb.0:
+    renamable $p0 = PTRUE_B 31
+    renamable $z0 = MLS_ZPZZZ_UNDEF_B killed renamable $p0, killed renamable $z2, killed renamable $z0, killed renamable $z1
+    RET_ReallyLR implicit $z0
+...
+
+# CHECK: {{.*}} MAD_ZPmZZ_B {{.*}}
+---
+name: expand_mla_to_mad
+body:             |
+  bb.0:
+    renamable $p0 = PTRUE_B 31
+    renamable $z0 = MLA_ZPZZZ_UNDEF_B killed renamable $p0, killed renamable $z2, killed renamable $z0, killed renamable $z1
+    RET_ReallyLR implicit $z0
+...

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
index 910e18dec1a94..31c0a24c166d3 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
@@ -109,10 +109,10 @@ define <16 x i8> @srem_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
 define void @srem_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: srem_v32i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ldp q2, q0, [x0]
 ; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ptrue p1.h, vl4
-; CHECK-NEXT:    ldp q3, q2, [x1]
+; CHECK-NEXT:    ldp q3, q1, [x1]
 ; CHECK-NEXT:    mov z5.d, z0.d
 ; CHECK-NEXT:    sunpklo z7.h, z0.b
 ; CHECK-NEXT:    ext z5.b, z5.b, z0.b, #8
@@ -120,9 +120,9 @@ define void @srem_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-NEXT:    sunpklo z18.s, z5.h
 ; CHECK-NEXT:    ext z5.b, z5.b, z5.b, #8
 ; CHECK-NEXT:    sunpklo z5.s, z5.h
-; CHECK-NEXT:    mov z4.d, z2.d
-; CHECK-NEXT:    sunpklo z6.h, z2.b
-; CHECK-NEXT:    ext z4.b, z4.b, z2.b, #8
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    sunpklo z6.h, z1.b
+; CHECK-NEXT:    ext z4.b, z4.b, z1.b, #8
 ; CHECK-NEXT:    sunpklo z16.s, z6.h
 ; CHECK-NEXT:    sunpklo z4.h, z4.b
 ; CHECK-NEXT:    ext z6.b, z6.b, z6.b, #8
@@ -139,9 +139,9 @@ define void @srem_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-NEXT:    splice z17.h, p1, z17.h, z4.h
 ; CHECK-NEXT:    sunpklo z4.s, z7.h
 ; CHECK-NEXT:    mov z6.d, z3.d
-; CHECK-NEXT:    mov z7.d, z1.d
+; CHECK-NEXT:    mov z7.d, z2.d
 ; CHECK-NEXT:    ext z6.b, z6.b, z3.b, #8
-; CHECK-NEXT:    ext z7.b, z7.b, z1.b, #8
+; CHECK-NEXT:    ext z7.b, z7.b, z2.b, #8
 ; CHECK-NEXT:    sdivr z16.s, p0/m, z16.s, z18.s
 ; CHECK-NEXT:    sunpklo z6.h, z6.b
 ; CHECK-NEXT:    sunpklo z7.h, z7.b
@@ -161,7 +161,7 @@ define void @srem_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-NEXT:    splice z5.h, p1, z5.h, z4.h
 ; CHECK-NEXT:    splice z7.h, p1, z7.h, z6.h
 ; CHECK-NEXT:    sunpklo z4.h, z3.b
-; CHECK-NEXT:    sunpklo z6.h, z1.b
+; CHECK-NEXT:    sunpklo z6.h, z2.b
 ; CHECK-NEXT:    sunpklo z16.s, z4.h
 ; CHECK-NEXT:    sunpklo z18.s, z6.h
 ; CHECK-NEXT:    ext z4.b, z4.b, z4.b, #8
@@ -181,9 +181,9 @@ define void @srem_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-NEXT:    ptrue p1.b, vl16
 ; CHECK-NEXT:    splice z7.b, p0, z7.b, z4.b
 ; CHECK-NEXT:    splice z5.b, p0, z5.b, z6.b
-; CHECK-NEXT:    mls z1.b, p1/m, z7.b, z3.b
-; CHECK-NEXT:    mls z0.b, p1/m, z5.b, z2.b
-; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    mls z2.b, p1/m, z7.b, z3.b
+; CHECK-NEXT:    mls z0.b, p1/m, z5.b, z1.b
+; CHECK-NEXT:    stp q2, q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <32 x i8>, ptr %a
   %op2 = load <32 x i8>, ptr %b
@@ -492,10 +492,10 @@ define <16 x i8> @urem_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
 define void @urem_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: urem_v32i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ldp q2, q0, [x0]
 ; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ptrue p1.h, vl4
-; CHECK-NEXT:    ldp q3, q2, [x1]
+; CHECK-NEXT:    ldp q3, q1, [x1]
 ; CHECK-NEXT:    mov z5.d, z0.d
 ; CHECK-NEXT:    uunpklo z7.h, z0.b
 ; CHECK-NEXT:    ext z5.b, z5.b, z0.b, #8
@@ -503,9 +503,9 @@ define void @urem_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-NEXT:    uunpklo z18.s, z5.h
 ; CHECK-NEXT:    ext z5.b, z5.b, z5.b, #8
 ; CHECK-NEXT:    uunpklo z5.s, z5.h
-; CHECK-NEXT:    mov z4.d, z2.d
-; CHECK-NEXT:    uunpklo z6.h, z2.b
-; CHECK-NEXT:    ext z4.b, z4.b, z2.b, #8
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uunpklo z6.h, z1.b
+; CHECK-NEXT:    ext z4.b, z4.b, z1.b, #8
 ; CHECK-NEXT:    uunpklo z16.s, z6.h
 ; CHECK-NEXT:    uunpklo z4.h, z4.b
 ; CHECK-NEXT:    ext z6.b, z6.b, z6.b, #8
@@ -522,9 +522,9 @@ define void @urem_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-NEXT:    splice z17.h, p1, z17.h, z4.h
 ; CHECK-NEXT:    uunpklo z4.s, z7.h
 ; CHECK-NEXT:    mov z6.d, z3.d
-; CHECK-NEXT:    mov z7.d, z1.d
+; CHECK-NEXT:    mov z7.d, z2.d
 ; CHECK-NEXT:    ext z6.b, z6.b, z3.b, #8
-; CHECK-NEXT:    ext z7.b, z7.b, z1.b, #8
+; CHECK-NEXT:    ext z7.b, z7.b, z2.b, #8
 ; CHECK-NEXT:    udivr z16.s, p0/m, z16.s, z18.s
 ; CHECK-NEXT:    uunpklo z6.h, z6.b
 ; CHECK-NEXT:    uunpklo z7.h, z7.b
@@ -544,7 +544,7 @@ define void @urem_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-NEXT:    splice z5.h, p1, z5.h, z4.h
 ; CHECK-NEXT:    splice z7.h, p1, z7.h, z6.h
 ; CHECK-NEXT:    uunpklo z4.h, z3.b
-; CHECK-NEXT:    uunpklo z6.h, z1.b
+; CHECK-NEXT:    uunpklo z6.h, z2.b
 ; CHECK-NEXT:    uunpklo z16.s, z4.h
 ; CHECK-NEXT:    uunpklo z18.s, z6.h
 ; CHECK-NEXT:    ext z4.b, z4.b, z4.b, #8
@@ -564,9 +564,9 @@ define void @urem_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-NEXT:    ptrue p1.b, vl16
 ; CHECK-NEXT:    splice z7.b, p0, z7.b, z4.b
 ; CHECK-NEXT:    splice z5.b, p0, z5.b, z6.b
-; CHECK-NEXT:    mls z1.b, p1/m, z7.b, z3.b
-; CHECK-NEXT:    mls z0.b, p1/m, z5.b, z2.b
-; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    mls z2.b, p1/m, z7.b, z3.b
+; CHECK-NEXT:    mls z0.b, p1/m, z5.b, z1.b
+; CHECK-NEXT:    stp q2, q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <32 x i8>, ptr %a
   %op2 = load <32 x i8>, ptr %b


        


More information about the llvm-commits mailing list