[llvm] 74445d6 - [SVE] Add vselect(mla/mls) patterns for cases where a multiplicand is used for the false lanes.

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 25 03:03:33 PDT 2023


Author: Paul Walker
Date: 2023-07-25T10:02:32Z
New Revision: 74445d652da99a1b622fccf18591811c016ceb4f

URL: https://github.com/llvm/llvm-project/commit/74445d652da99a1b622fccf18591811c016ceb4f
DIFF: https://github.com/llvm/llvm-project/commit/74445d652da99a1b622fccf18591811c016ceb4f.diff

LOG: [SVE] Add vselect(mla/mls) patterns for cases where a multiplicand is used for the false lanes.

Differential Revision: https://reviews.llvm.org/D155972

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll
    llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 9faf067e8ccecc..ad404e8dab2ad4 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -401,19 +401,24 @@ def AArch64subr : PatFrag<(ops node:$op1, node:$op2),
                           (sub node:$op2, node:$op1)>;
 def AArch64mla_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
                              [(int_aarch64_sve_mla node:$pred, node:$op1, node:$op2, node:$op3),
-                              // select(mask, add(a, mul(b, c)), a) -> mla(a, mask, b, c)
                               (vselect node:$pred, (add node:$op1, (AArch64mul_p_oneuse (SVEAllActive), node:$op2, node:$op3)), node:$op1)]>;
-// pattern for generating pseudo for MLA_ZPmZZ/MAD_ZPmZZ
 def AArch64mla_p : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
                             [(int_aarch64_sve_mla_u node:$pred, node:$op1, node:$op2, node:$op3),
                              (add node:$op1, (AArch64mul_p_oneuse node:$pred, node:$op2, node:$op3))]>;
+def AArch64mad_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
+                             [(int_aarch64_sve_mad node:$pred, node:$op1, node:$op2, node:$op3),
+                              (vselect node:$pred, (add node:$op3, (AArch64mul_p_oneuse (SVEAllActive), node:$op1, node:$op2)), node:$op1),
+                              (vselect node:$pred, (add node:$op3, (AArch64mul_p_oneuse (SVEAllActive), node:$op2, node:$op1)), node:$op1)]>;
 def AArch64mls_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
                              [(int_aarch64_sve_mls node:$pred, node:$op1, node:$op2, node:$op3),
-                              // select(mask, sub(a, mul(b, c)), a) -> mls(a, mask, b, c)
                               (vselect node:$pred, (sub node:$op1, (AArch64mul_p_oneuse (SVEAllActive), node:$op2, node:$op3)), node:$op1)]>;
 def AArch64mls_p : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
                              [(int_aarch64_sve_mls_u node:$pred, node:$op1, node:$op2, node:$op3),
                               (sub node:$op1, (AArch64mul_p_oneuse node:$pred, node:$op2, node:$op3))]>;
+def AArch64msb_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
+                             [(int_aarch64_sve_msb node:$pred, node:$op1, node:$op2, node:$op3),
+                              (vselect node:$pred, (sub node:$op3, (AArch64mul_p_oneuse (SVEAllActive), node:$op1, node:$op2)), node:$op1),
+                              (vselect node:$pred, (sub node:$op3, (AArch64mul_p_oneuse (SVEAllActive), node:$op2, node:$op1)), node:$op1)]>;
 def AArch64eor3 : PatFrags<(ops node:$op1, node:$op2, node:$op3),
                            [(int_aarch64_sve_eor3 node:$op1, node:$op2, node:$op3),
                             (xor node:$op1, (xor node:$op2, node:$op3))]>;
@@ -497,8 +502,8 @@ let Predicates = [HasSVEorSME] in {
   defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", ssubsat>;
   defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", usubsat>;
 
-  defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad, "MLA_ZPmZZ", /*isReverseInstr*/ 1>;
-  defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb, "MLS_ZPmZZ", /*isReverseInstr*/ 1>;
+  defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", AArch64mad_m1, "MLA_ZPmZZ", /*isReverseInstr*/ 1>;
+  defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", AArch64msb_m1, "MLS_ZPmZZ", /*isReverseInstr*/ 1>;
   defm MLA_ZPmZZ : sve_int_mlas_vvv_pred<0b0, "mla", AArch64mla_m1, "MLA_ZPZZZ", "MAD_ZPmZZ">;
   defm MLS_ZPmZZ : sve_int_mlas_vvv_pred<0b1, "mls", AArch64mls_m1, "MLS_ZPZZZ", "MSB_ZPmZZ">;
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll b/llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll
index aa6681b6e0ff9c..64788d349cf36f 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll
@@ -935,9 +935,8 @@ define <vscale x 2 x i64> @mls_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i6
 ; CHECK-LABEL: mls_nxv2i64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    cmpgt p1.d, p0/z, z3.d, #0
-; CHECK-NEXT:    msb z1.d, p0/m, z0.d, z2.d
-; CHECK-NEXT:    mov z0.d, p1/m, z1.d
+; CHECK-NEXT:    cmpgt p0.d, p0/z, z3.d, #0
+; CHECK-NEXT:    msb z0.d, p0/m, z1.d, z2.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
@@ -951,9 +950,8 @@ define <vscale x 4 x i32> @mls_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i3
 ; CHECK-LABEL: mls_nxv4i32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    cmpgt p1.s, p0/z, z3.s, #0
-; CHECK-NEXT:    msb z1.s, p0/m, z0.s, z2.s
-; CHECK-NEXT:    mov z0.s, p1/m, z1.s
+; CHECK-NEXT:    cmpgt p0.s, p0/z, z3.s, #0
+; CHECK-NEXT:    msb z0.s, p0/m, z1.s, z2.s
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
@@ -967,9 +965,8 @@ define <vscale x 8 x i16> @mls_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i1
 ; CHECK-LABEL: mls_nxv8i16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    cmpgt p1.h, p0/z, z3.h, #0
-; CHECK-NEXT:    msb z1.h, p0/m, z0.h, z2.h
-; CHECK-NEXT:    mov z0.h, p1/m, z1.h
+; CHECK-NEXT:    cmpgt p0.h, p0/z, z3.h, #0
+; CHECK-NEXT:    msb z0.h, p0/m, z1.h, z2.h
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
@@ -983,9 +980,8 @@ define <vscale x 16 x i8> @mls_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i
 ; CHECK-LABEL: mls_nxv16i8_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    cmpgt p1.b, p0/z, z3.b, #0
-; CHECK-NEXT:    msb z1.b, p0/m, z0.b, z2.b
-; CHECK-NEXT:    mov z0.b, p1/m, z1.b
+; CHECK-NEXT:    cmpgt p0.b, p0/z, z3.b, #0
+; CHECK-NEXT:    msb z0.b, p0/m, z1.b, z2.b
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
@@ -1812,8 +1808,8 @@ define <vscale x 2 x i64> @srem_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i
 ; CHECK-NEXT:    cmpgt p1.d, p0/z, z2.d, #0
 ; CHECK-NEXT:    movprfx z2, z0
 ; CHECK-NEXT:    sdiv z2.d, p0/m, z2.d, z1.d
-; CHECK-NEXT:    mls z0.d, p0/m, z2.d, z1.d
-; CHECK-NEXT:    sel z0.d, p1, z0.d, z1.d
+; CHECK-NEXT:    msb z1.d, p1/m, z2.d, z0.d
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
@@ -1829,8 +1825,8 @@ define <vscale x 4 x i32> @srem_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i
 ; CHECK-NEXT:    cmpgt p1.s, p0/z, z2.s, #0
 ; CHECK-NEXT:    movprfx z2, z0
 ; CHECK-NEXT:    sdiv z2.s, p0/m, z2.s, z1.s
-; CHECK-NEXT:    mls z0.s, p0/m, z2.s, z1.s
-; CHECK-NEXT:    sel z0.s, p1, z0.s, z1.s
+; CHECK-NEXT:    msb z1.s, p1/m, z2.s, z0.s
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
@@ -1842,19 +1838,19 @@ entry:
 define <vscale x 8 x i16> @srem_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
 ; CHECK-LABEL: srem_nxv8i16_y:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p1.s
 ; CHECK-NEXT:    sunpkhi z3.s, z1.h
 ; CHECK-NEXT:    sunpkhi z4.s, z0.h
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    sdivr z3.s, p1/m, z3.s, z4.s
 ; CHECK-NEXT:    sunpklo z5.s, z1.h
-; CHECK-NEXT:    sdivr z3.s, p0/m, z3.s, z4.s
 ; CHECK-NEXT:    sunpklo z6.s, z0.h
 ; CHECK-NEXT:    movprfx z4, z6
-; CHECK-NEXT:    sdiv z4.s, p0/m, z4.s, z5.s
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    uzp1 z3.h, z4.h, z3.h
-; CHECK-NEXT:    cmpgt p1.h, p0/z, z2.h, #0
-; CHECK-NEXT:    mls z0.h, p0/m, z3.h, z1.h
-; CHECK-NEXT:    sel z0.h, p1, z0.h, z1.h
+; CHECK-NEXT:    sdiv z4.s, p1/m, z4.s, z5.s
+; CHECK-NEXT:    cmpgt p0.h, p0/z, z2.h, #0
+; CHECK-NEXT:    uzp1 z2.h, z4.h, z3.h
+; CHECK-NEXT:    msb z1.h, p0/m, z2.h, z0.h
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
@@ -1871,25 +1867,26 @@ define <vscale x 16 x i8> @srem_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    sunpkhi z5.s, z3.h
 ; CHECK-NEXT:    sunpkhi z6.s, z4.h
-; CHECK-NEXT:    sunpklo z3.s, z3.h
-; CHECK-NEXT:    sunpklo z4.s, z4.h
+; CHECK-NEXT:    sunpklo z7.h, z1.b
 ; CHECK-NEXT:    sdivr z5.s, p0/m, z5.s, z6.s
-; CHECK-NEXT:    sdivr z3.s, p0/m, z3.s, z4.s
-; CHECK-NEXT:    sunpklo z4.h, z1.b
 ; CHECK-NEXT:    sunpklo z6.h, z0.b
-; CHECK-NEXT:    sunpkhi z7.s, z4.h
-; CHECK-NEXT:    sunpkhi z24.s, z6.h
+; CHECK-NEXT:    sunpklo z3.s, z3.h
 ; CHECK-NEXT:    sunpklo z4.s, z4.h
+; CHECK-NEXT:    sunpkhi z24.s, z7.h
+; CHECK-NEXT:    sunpkhi z25.s, z6.h
+; CHECK-NEXT:    sunpklo z7.s, z7.h
 ; CHECK-NEXT:    sunpklo z6.s, z6.h
-; CHECK-NEXT:    sdivr z7.s, p0/m, z7.s, z24.s
-; CHECK-NEXT:    sdivr z4.s, p0/m, z4.s, z6.s
-; CHECK-NEXT:    uzp1 z3.h, z3.h, z5.h
-; CHECK-NEXT:    uzp1 z4.h, z4.h, z7.h
+; CHECK-NEXT:    sdivr z3.s, p0/m, z3.s, z4.s
+; CHECK-NEXT:    movprfx z4, z25
+; CHECK-NEXT:    sdiv z4.s, p0/m, z4.s, z24.s
+; CHECK-NEXT:    sdiv z6.s, p0/m, z6.s, z7.s
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    uzp1 z3.b, z4.b, z3.b
-; CHECK-NEXT:    cmpgt p1.b, p0/z, z2.b, #0
-; CHECK-NEXT:    mls z0.b, p0/m, z3.b, z1.b
-; CHECK-NEXT:    sel z0.b, p1, z0.b, z1.b
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z5.h
+; CHECK-NEXT:    uzp1 z4.h, z6.h, z4.h
+; CHECK-NEXT:    cmpgt p0.b, p0/z, z2.b, #0
+; CHECK-NEXT:    uzp1 z2.b, z4.b, z3.b
+; CHECK-NEXT:    msb z1.b, p0/m, z2.b, z0.b
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
@@ -1905,8 +1902,8 @@ define <vscale x 2 x i64> @urem_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i
 ; CHECK-NEXT:    cmpgt p1.d, p0/z, z2.d, #0
 ; CHECK-NEXT:    movprfx z2, z0
 ; CHECK-NEXT:    udiv z2.d, p0/m, z2.d, z1.d
-; CHECK-NEXT:    mls z0.d, p0/m, z2.d, z1.d
-; CHECK-NEXT:    sel z0.d, p1, z0.d, z1.d
+; CHECK-NEXT:    msb z1.d, p1/m, z2.d, z0.d
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
@@ -1922,8 +1919,8 @@ define <vscale x 4 x i32> @urem_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i
 ; CHECK-NEXT:    cmpgt p1.s, p0/z, z2.s, #0
 ; CHECK-NEXT:    movprfx z2, z0
 ; CHECK-NEXT:    udiv z2.s, p0/m, z2.s, z1.s
-; CHECK-NEXT:    mls z0.s, p0/m, z2.s, z1.s
-; CHECK-NEXT:    sel z0.s, p1, z0.s, z1.s
+; CHECK-NEXT:    msb z1.s, p1/m, z2.s, z0.s
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
@@ -1935,19 +1932,19 @@ entry:
 define <vscale x 8 x i16> @urem_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
 ; CHECK-LABEL: urem_nxv8i16_y:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p1.s
 ; CHECK-NEXT:    uunpkhi z3.s, z1.h
 ; CHECK-NEXT:    uunpkhi z4.s, z0.h
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    udivr z3.s, p1/m, z3.s, z4.s
 ; CHECK-NEXT:    uunpklo z5.s, z1.h
-; CHECK-NEXT:    udivr z3.s, p0/m, z3.s, z4.s
 ; CHECK-NEXT:    uunpklo z6.s, z0.h
 ; CHECK-NEXT:    movprfx z4, z6
-; CHECK-NEXT:    udiv z4.s, p0/m, z4.s, z5.s
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    uzp1 z3.h, z4.h, z3.h
-; CHECK-NEXT:    cmpgt p1.h, p0/z, z2.h, #0
-; CHECK-NEXT:    mls z0.h, p0/m, z3.h, z1.h
-; CHECK-NEXT:    sel z0.h, p1, z0.h, z1.h
+; CHECK-NEXT:    udiv z4.s, p1/m, z4.s, z5.s
+; CHECK-NEXT:    cmpgt p0.h, p0/z, z2.h, #0
+; CHECK-NEXT:    uzp1 z2.h, z4.h, z3.h
+; CHECK-NEXT:    msb z1.h, p0/m, z2.h, z0.h
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
@@ -1964,25 +1961,26 @@ define <vscale x 16 x i8> @urem_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    uunpkhi z5.s, z3.h
 ; CHECK-NEXT:    uunpkhi z6.s, z4.h
-; CHECK-NEXT:    uunpklo z3.s, z3.h
-; CHECK-NEXT:    uunpklo z4.s, z4.h
+; CHECK-NEXT:    uunpklo z7.h, z1.b
 ; CHECK-NEXT:    udivr z5.s, p0/m, z5.s, z6.s
-; CHECK-NEXT:    udivr z3.s, p0/m, z3.s, z4.s
-; CHECK-NEXT:    uunpklo z4.h, z1.b
 ; CHECK-NEXT:    uunpklo z6.h, z0.b
-; CHECK-NEXT:    uunpkhi z7.s, z4.h
-; CHECK-NEXT:    uunpkhi z24.s, z6.h
+; CHECK-NEXT:    uunpklo z3.s, z3.h
 ; CHECK-NEXT:    uunpklo z4.s, z4.h
+; CHECK-NEXT:    uunpkhi z24.s, z7.h
+; CHECK-NEXT:    uunpkhi z25.s, z6.h
+; CHECK-NEXT:    uunpklo z7.s, z7.h
 ; CHECK-NEXT:    uunpklo z6.s, z6.h
-; CHECK-NEXT:    udivr z7.s, p0/m, z7.s, z24.s
-; CHECK-NEXT:    udivr z4.s, p0/m, z4.s, z6.s
-; CHECK-NEXT:    uzp1 z3.h, z3.h, z5.h
-; CHECK-NEXT:    uzp1 z4.h, z4.h, z7.h
+; CHECK-NEXT:    udivr z3.s, p0/m, z3.s, z4.s
+; CHECK-NEXT:    movprfx z4, z25
+; CHECK-NEXT:    udiv z4.s, p0/m, z4.s, z24.s
+; CHECK-NEXT:    udiv z6.s, p0/m, z6.s, z7.s
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    uzp1 z3.b, z4.b, z3.b
-; CHECK-NEXT:    cmpgt p1.b, p0/z, z2.b, #0
-; CHECK-NEXT:    mls z0.b, p0/m, z3.b, z1.b
-; CHECK-NEXT:    sel z0.b, p1, z0.b, z1.b
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z5.h
+; CHECK-NEXT:    uzp1 z4.h, z6.h, z4.h
+; CHECK-NEXT:    cmpgt p0.b, p0/z, z2.b, #0
+; CHECK-NEXT:    uzp1 z2.b, z4.b, z3.b
+; CHECK-NEXT:    msb z1.b, p0/m, z2.b, z0.b
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
@@ -2355,9 +2353,9 @@ define <vscale x 2 x i64> @mla_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i6
 ; CHECK-LABEL: mla_nxv2i64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    cmpgt p1.d, p0/z, z3.d, #0
-; CHECK-NEXT:    mla z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT:    sel z0.d, p1, z0.d, z1.d
+; CHECK-NEXT:    cmpgt p0.d, p0/z, z3.d, #0
+; CHECK-NEXT:    mad z1.d, p0/m, z2.d, z0.d
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
@@ -2371,9 +2369,9 @@ define <vscale x 4 x i32> @mla_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i3
 ; CHECK-LABEL: mla_nxv4i32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    cmpgt p1.s, p0/z, z3.s, #0
-; CHECK-NEXT:    mla z0.s, p0/m, z1.s, z2.s
-; CHECK-NEXT:    sel z0.s, p1, z0.s, z1.s
+; CHECK-NEXT:    cmpgt p0.s, p0/z, z3.s, #0
+; CHECK-NEXT:    mad z1.s, p0/m, z2.s, z0.s
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
@@ -2387,9 +2385,9 @@ define <vscale x 8 x i16> @mla_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i1
 ; CHECK-LABEL: mla_nxv8i16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    cmpgt p1.h, p0/z, z3.h, #0
-; CHECK-NEXT:    mla z0.h, p0/m, z1.h, z2.h
-; CHECK-NEXT:    sel z0.h, p1, z0.h, z1.h
+; CHECK-NEXT:    cmpgt p0.h, p0/z, z3.h, #0
+; CHECK-NEXT:    mad z1.h, p0/m, z2.h, z0.h
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
@@ -2403,9 +2401,9 @@ define <vscale x 16 x i8> @mla_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i
 ; CHECK-LABEL: mla_nxv16i8_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    cmpgt p1.b, p0/z, z3.b, #0
-; CHECK-NEXT:    mla z0.b, p0/m, z1.b, z2.b
-; CHECK-NEXT:    sel z0.b, p1, z0.b, z1.b
+; CHECK-NEXT:    cmpgt p0.b, p0/z, z3.b, #0
+; CHECK-NEXT:    mad z1.b, p0/m, z2.b, z0.b
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
@@ -2419,9 +2417,9 @@ define <vscale x 2 x i64> @mls_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i6
 ; CHECK-LABEL: mls_nxv2i64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    cmpgt p1.d, p0/z, z3.d, #0
-; CHECK-NEXT:    msb z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT:    sel z0.d, p1, z0.d, z1.d
+; CHECK-NEXT:    cmpgt p0.d, p0/z, z3.d, #0
+; CHECK-NEXT:    msb z1.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
@@ -2435,9 +2433,9 @@ define <vscale x 4 x i32> @mls_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i3
 ; CHECK-LABEL: mls_nxv4i32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    cmpgt p1.s, p0/z, z3.s, #0
-; CHECK-NEXT:    msb z0.s, p0/m, z1.s, z2.s
-; CHECK-NEXT:    sel z0.s, p1, z0.s, z1.s
+; CHECK-NEXT:    cmpgt p0.s, p0/z, z3.s, #0
+; CHECK-NEXT:    msb z1.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
@@ -2451,9 +2449,9 @@ define <vscale x 8 x i16> @mls_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i1
 ; CHECK-LABEL: mls_nxv8i16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    cmpgt p1.h, p0/z, z3.h, #0
-; CHECK-NEXT:    msb z0.h, p0/m, z1.h, z2.h
-; CHECK-NEXT:    sel z0.h, p1, z0.h, z1.h
+; CHECK-NEXT:    cmpgt p0.h, p0/z, z3.h, #0
+; CHECK-NEXT:    msb z1.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
@@ -2467,9 +2465,9 @@ define <vscale x 16 x i8> @mls_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i
 ; CHECK-LABEL: mls_nxv16i8_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    cmpgt p1.b, p0/z, z3.b, #0
-; CHECK-NEXT:    msb z0.b, p0/m, z1.b, z2.b
-; CHECK-NEXT:    sel z0.b, p1, z0.b, z1.b
+; CHECK-NEXT:    cmpgt p0.b, p0/z, z3.b, #0
+; CHECK-NEXT:    msb z1.b, p0/m, z0.b, z2.b
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer

diff  --git a/llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll b/llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll
index 3b1b616a59154e..119ec16542ddc4 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll
@@ -581,9 +581,8 @@ define <vscale x 2 x i64> @mls_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i6
 ; CHECK-LABEL: mls_nxv2i64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    cmpgt p1.d, p0/z, z3.d, #0
-; CHECK-NEXT:    msb z1.d, p0/m, z0.d, z2.d
-; CHECK-NEXT:    mov z0.d, p1/m, z1.d
+; CHECK-NEXT:    cmpgt p0.d, p0/z, z3.d, #0
+; CHECK-NEXT:    msb z0.d, p0/m, z1.d, z2.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
@@ -597,9 +596,8 @@ define <vscale x 4 x i32> @mls_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i3
 ; CHECK-LABEL: mls_nxv4i32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    cmpgt p1.s, p0/z, z3.s, #0
-; CHECK-NEXT:    msb z1.s, p0/m, z0.s, z2.s
-; CHECK-NEXT:    mov z0.s, p1/m, z1.s
+; CHECK-NEXT:    cmpgt p0.s, p0/z, z3.s, #0
+; CHECK-NEXT:    msb z0.s, p0/m, z1.s, z2.s
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
@@ -613,9 +611,8 @@ define <vscale x 8 x i16> @mls_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i1
 ; CHECK-LABEL: mls_nxv8i16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    cmpgt p1.h, p0/z, z3.h, #0
-; CHECK-NEXT:    msb z1.h, p0/m, z0.h, z2.h
-; CHECK-NEXT:    mov z0.h, p1/m, z1.h
+; CHECK-NEXT:    cmpgt p0.h, p0/z, z3.h, #0
+; CHECK-NEXT:    msb z0.h, p0/m, z1.h, z2.h
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
@@ -629,9 +626,8 @@ define <vscale x 16 x i8> @mls_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i
 ; CHECK-LABEL: mls_nxv16i8_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    cmpgt p1.b, p0/z, z3.b, #0
-; CHECK-NEXT:    msb z1.b, p0/m, z0.b, z2.b
-; CHECK-NEXT:    mov z0.b, p1/m, z1.b
+; CHECK-NEXT:    cmpgt p0.b, p0/z, z3.b, #0
+; CHECK-NEXT:    msb z0.b, p0/m, z1.b, z2.b
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
@@ -1416,9 +1412,9 @@ define <vscale x 2 x i64> @mla_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i6
 ; CHECK-LABEL: mla_nxv2i64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    cmpgt p1.d, p0/z, z3.d, #0
-; CHECK-NEXT:    mla z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT:    sel z0.d, p1, z0.d, z1.d
+; CHECK-NEXT:    cmpgt p0.d, p0/z, z3.d, #0
+; CHECK-NEXT:    mad z1.d, p0/m, z2.d, z0.d
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
@@ -1432,9 +1428,9 @@ define <vscale x 4 x i32> @mla_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i3
 ; CHECK-LABEL: mla_nxv4i32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    cmpgt p1.s, p0/z, z3.s, #0
-; CHECK-NEXT:    mla z0.s, p0/m, z1.s, z2.s
-; CHECK-NEXT:    sel z0.s, p1, z0.s, z1.s
+; CHECK-NEXT:    cmpgt p0.s, p0/z, z3.s, #0
+; CHECK-NEXT:    mad z1.s, p0/m, z2.s, z0.s
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
@@ -1448,9 +1444,9 @@ define <vscale x 8 x i16> @mla_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i1
 ; CHECK-LABEL: mla_nxv8i16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    cmpgt p1.h, p0/z, z3.h, #0
-; CHECK-NEXT:    mla z0.h, p0/m, z1.h, z2.h
-; CHECK-NEXT:    sel z0.h, p1, z0.h, z1.h
+; CHECK-NEXT:    cmpgt p0.h, p0/z, z3.h, #0
+; CHECK-NEXT:    mad z1.h, p0/m, z2.h, z0.h
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
@@ -1464,9 +1460,9 @@ define <vscale x 16 x i8> @mla_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i
 ; CHECK-LABEL: mla_nxv16i8_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    cmpgt p1.b, p0/z, z3.b, #0
-; CHECK-NEXT:    mla z0.b, p0/m, z1.b, z2.b
-; CHECK-NEXT:    sel z0.b, p1, z0.b, z1.b
+; CHECK-NEXT:    cmpgt p0.b, p0/z, z3.b, #0
+; CHECK-NEXT:    mad z1.b, p0/m, z2.b, z0.b
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
@@ -1480,9 +1476,9 @@ define <vscale x 2 x i64> @mls_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i6
 ; CHECK-LABEL: mls_nxv2i64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    cmpgt p1.d, p0/z, z3.d, #0
-; CHECK-NEXT:    msb z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT:    sel z0.d, p1, z0.d, z1.d
+; CHECK-NEXT:    cmpgt p0.d, p0/z, z3.d, #0
+; CHECK-NEXT:    msb z1.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
@@ -1496,9 +1492,9 @@ define <vscale x 4 x i32> @mls_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i3
 ; CHECK-LABEL: mls_nxv4i32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    cmpgt p1.s, p0/z, z3.s, #0
-; CHECK-NEXT:    msb z0.s, p0/m, z1.s, z2.s
-; CHECK-NEXT:    sel z0.s, p1, z0.s, z1.s
+; CHECK-NEXT:    cmpgt p0.s, p0/z, z3.s, #0
+; CHECK-NEXT:    msb z1.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
@@ -1512,9 +1508,9 @@ define <vscale x 8 x i16> @mls_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i1
 ; CHECK-LABEL: mls_nxv8i16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    cmpgt p1.h, p0/z, z3.h, #0
-; CHECK-NEXT:    msb z0.h, p0/m, z1.h, z2.h
-; CHECK-NEXT:    sel z0.h, p1, z0.h, z1.h
+; CHECK-NEXT:    cmpgt p0.h, p0/z, z3.h, #0
+; CHECK-NEXT:    msb z1.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
@@ -1528,9 +1524,9 @@ define <vscale x 16 x i8> @mls_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i
 ; CHECK-LABEL: mls_nxv16i8_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    cmpgt p1.b, p0/z, z3.b, #0
-; CHECK-NEXT:    msb z0.b, p0/m, z1.b, z2.b
-; CHECK-NEXT:    sel z0.b, p1, z0.b, z1.b
+; CHECK-NEXT:    cmpgt p0.b, p0/z, z3.b, #0
+; CHECK-NEXT:    msb z1.b, p0/m, z0.b, z2.b
+; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer


        


More information about the llvm-commits mailing list