[llvm] 58ceb81 - [SVE] Add SVE2 patterns for unpredicated multiply instructions

Danilo Carvalho Grael via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 23 10:22:40 PST 2020


Author: Danilo Carvalho Grael
Date: 2020-01-23T13:20:53-05:00
New Revision: 58ceb81d318b9a39f651e18ed68f8083e21719a0

URL: https://github.com/llvm/llvm-project/commit/58ceb81d318b9a39f651e18ed68f8083e21719a0
DIFF: https://github.com/llvm/llvm-project/commit/58ceb81d318b9a39f651e18ed68f8083e21719a0.diff

LOG: [SVE] Add SVE2 patterns for unpredicated multiply instructions

Summary:
Add patterns for SVE2 unpredicated multiply instructions:
- mul, smulh, umulh, pmul, sqdmulh, sqrdmulh

Reviewers: sdesmalen, huntergr, efriedma, c-rhodes, kmclaughlin, rengolin

Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits, amehsan

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D72799

Added: 
    llvm/test/CodeGen/AArch64/sve2-int-mul.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsAArch64.td
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/lib/Target/AArch64/SVEInstrFormats.td
    llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll
    llvm/test/CodeGen/AArch64/sve-int-mul-pred.ll

Removed: 
    llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm-2.ll
    llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm.ll


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 53cc89511151..f0036c17cc2a 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1192,6 +1192,10 @@ def int_aarch64_sve_add   : AdvSIMD_Pred2VectorArg_Intrinsic;
 def int_aarch64_sve_sub   : AdvSIMD_Pred2VectorArg_Intrinsic;
 def int_aarch64_sve_subr  : AdvSIMD_Pred2VectorArg_Intrinsic;
 
+def int_aarch64_sve_pmul       : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_sqdmulh    : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_sqrdmulh   : AdvSIMD_2VectorArg_Intrinsic;
+
 def int_aarch64_sve_mul        : AdvSIMD_Pred2VectorArg_Intrinsic;
 def int_aarch64_sve_smulh      : AdvSIMD_Pred2VectorArg_Intrinsic;
 def int_aarch64_sve_umulh      : AdvSIMD_Pred2VectorArg_Intrinsic;

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index c2c14467f4a0..08a2f914ed47 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -158,11 +158,21 @@ let Predicates = [HasSVE] in {
   defm UMAX_ZI   : sve_int_arith_imm1_unsigned<0b01, "umax", umax>;
   defm UMIN_ZI   : sve_int_arith_imm1_unsigned<0b11, "umin", umin>;
 
-  defm MUL_ZI    : sve_int_arith_imm2<"mul", mul>;
-  defm MUL_ZPmZ   : sve_int_bin_pred_arit_2<0b000, "mul", int_aarch64_sve_mul>;
+  defm MUL_ZI     : sve_int_arith_imm2<"mul", mul>;
+  defm MUL_ZPmZ   : sve_int_bin_pred_arit_2<0b000, "mul",   int_aarch64_sve_mul>;
   defm SMULH_ZPmZ : sve_int_bin_pred_arit_2<0b010, "smulh", int_aarch64_sve_smulh>;
   defm UMULH_ZPmZ : sve_int_bin_pred_arit_2<0b011, "umulh", int_aarch64_sve_umulh>;
 
+  // Add unpredicated alternative for the mul instruction.
+  def : Pat<(mul nxv16i8:$Op1, nxv16i8:$Op2),
+            (MUL_ZPmZ_B (PTRUE_B 31), $Op1, $Op2)>;
+  def : Pat<(mul nxv8i16:$Op1, nxv8i16:$Op2),
+            (MUL_ZPmZ_H (PTRUE_H 31), $Op1, $Op2)>;
+  def : Pat<(mul nxv4i32:$Op1, nxv4i32:$Op2),
+            (MUL_ZPmZ_S (PTRUE_S 31), $Op1, $Op2)>;
+  def : Pat<(mul nxv2i64:$Op1, nxv2i64:$Op2),
+            (MUL_ZPmZ_D (PTRUE_D 31), $Op1, $Op2)>;
+
   defm SDIV_ZPmZ  : sve_int_bin_pred_arit_2_div<0b100, "sdiv", int_aarch64_sve_sdiv>;
   defm UDIV_ZPmZ  : sve_int_bin_pred_arit_2_div<0b101, "udiv", int_aarch64_sve_udiv>;
   defm SDIVR_ZPmZ : sve_int_bin_pred_arit_2_div<0b110, "sdivr", int_aarch64_sve_sdivr>;
@@ -1405,15 +1415,32 @@ let Predicates = [HasSVE2] in {
   defm SQRDMULH_ZZZI : sve2_int_mul_by_indexed_elem<0b1101, "sqrdmulh">;
 
   // SVE2 signed saturating doubling multiply high (unpredicated)
-  defm SQDMULH_ZZZ  : sve2_int_mul<0b100, "sqdmulh">;
-  defm SQRDMULH_ZZZ : sve2_int_mul<0b101, "sqrdmulh">;
+  defm SQDMULH_ZZZ  : sve2_int_mul<0b100, "sqdmulh",  int_aarch64_sve_sqdmulh>;
+  defm SQRDMULH_ZZZ : sve2_int_mul<0b101, "sqrdmulh", int_aarch64_sve_sqrdmulh>;
 
   // SVE2 integer multiply vectors (unpredicated)
-  defm MUL_ZZZ    : sve2_int_mul<0b000, "mul">;
-  defm SMULH_ZZZ  : sve2_int_mul<0b010, "smulh">;
-  defm UMULH_ZZZ  : sve2_int_mul<0b011, "umulh">;
-  def  PMUL_ZZZ_B : sve2_int_mul<0b00, 0b001, "pmul", ZPR8>;
-
+  defm MUL_ZZZ    : sve2_int_mul<0b000,  "mul",   mul>;
+  defm SMULH_ZZZ  : sve2_int_mul<0b010,  "smulh", null_frag>;
+  defm UMULH_ZZZ  : sve2_int_mul<0b011,  "umulh", null_frag>;
+  defm PMUL_ZZZ   : sve2_int_mul_single<0b001, "pmul",  int_aarch64_sve_pmul>;
+
+  // Add patterns for unpredicated version of smulh and umulh.
+  def : Pat<(nxv16i8 (int_aarch64_sve_smulh (nxv16i1 (AArch64ptrue 31)), nxv16i8:$Op1, nxv16i8:$Op2)),
+            (SMULH_ZZZ_B $Op1, $Op2)>;
+  def : Pat<(nxv8i16 (int_aarch64_sve_smulh (nxv8i1 (AArch64ptrue 31)), nxv8i16:$Op1, nxv8i16:$Op2)),
+            (SMULH_ZZZ_H $Op1, $Op2)>;
+  def : Pat<(nxv4i32 (int_aarch64_sve_smulh (nxv4i1 (AArch64ptrue 31)), nxv4i32:$Op1, nxv4i32:$Op2)),
+            (SMULH_ZZZ_S $Op1, $Op2)>;
+  def : Pat<(nxv2i64 (int_aarch64_sve_smulh (nxv2i1 (AArch64ptrue 31)), nxv2i64:$Op1, nxv2i64:$Op2)),
+            (SMULH_ZZZ_D $Op1, $Op2)>;
+  def : Pat<(nxv16i8 (int_aarch64_sve_umulh (nxv16i1 (AArch64ptrue 31)), nxv16i8:$Op1, nxv16i8:$Op2)),
+            (UMULH_ZZZ_B $Op1, $Op2)>;
+  def : Pat<(nxv8i16 (int_aarch64_sve_umulh (nxv8i1 (AArch64ptrue 31)), nxv8i16:$Op1, nxv8i16:$Op2)),
+            (UMULH_ZZZ_H $Op1, $Op2)>;
+  def : Pat<(nxv4i32 (int_aarch64_sve_umulh (nxv4i1 (AArch64ptrue 31)), nxv4i32:$Op1, nxv4i32:$Op2)),
+            (UMULH_ZZZ_S $Op1, $Op2)>;
+  def : Pat<(nxv2i64 (int_aarch64_sve_umulh (nxv2i1 (AArch64ptrue 31)), nxv2i64:$Op1, nxv2i64:$Op2)),
+            (UMULH_ZZZ_D $Op1, $Op2)>;
   // SVE2 complex integer dot product (indexed)
   defm CDOT_ZZZI : sve2_cintx_dot_by_indexed_elem<"cdot">;
 

diff  --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 3de94080fd43..0c5e8b8fec9d 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -2621,11 +2621,22 @@ class sve2_int_mul<bits<2> sz, bits<3> opc, string asm, ZPRRegOp zprty>
   let Inst{4-0}   = Zd;
 }
 
-multiclass sve2_int_mul<bits<3> opc, string asm> {
+multiclass sve2_int_mul<bits<3> opc, string asm, SDPatternOperator op> {
   def _B : sve2_int_mul<0b00, opc, asm, ZPR8>;
   def _H : sve2_int_mul<0b01, opc, asm, ZPR16>;
   def _S : sve2_int_mul<0b10, opc, asm, ZPR32>;
   def _D : sve2_int_mul<0b11, opc, asm, ZPR64>;
+
+  def : SVE_2_Op_Pat<nxv16i8, op, nxv16i8, nxv16i8, !cast<Instruction>(NAME # _B)>;
+  def : SVE_2_Op_Pat<nxv8i16, op, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _H)>;
+  def : SVE_2_Op_Pat<nxv4i32, op, nxv4i32, nxv4i32, !cast<Instruction>(NAME # _S)>;
+  def : SVE_2_Op_Pat<nxv2i64, op, nxv2i64, nxv2i64, !cast<Instruction>(NAME # _D)>;
+}
+
+multiclass sve2_int_mul_single<bits<3> opc, string asm, SDPatternOperator op> {
+  def _B : sve2_int_mul<0b00, opc, asm, ZPR8>;
+
+  def : SVE_2_Op_Pat<nxv16i8, op, nxv16i8, nxv16i8, !cast<Instruction>(NAME # _B)>;
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll b/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll
index 451ddbbd8765..75965e3466ef 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll
@@ -446,3 +446,39 @@ define <vscale x 2 x i64> @mul_i64_pos(<vscale x 2 x i64> %a) {
   %res = mul <vscale x 2 x i64> %a, %splat
   ret <vscale x 2 x i64> %res
 }
+
+define <vscale x 8 x i16> @mul_i16_range(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: mul_i16_range
+; CHECK: mov w[[W:[0-9]+]], #255
+; CHECK-NEXT: mov z1.h, w[[W]]
+; CHECK: ptrue p0.h
+; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h
+  %elt = insertelement <vscale x 8 x i16> undef, i16 255, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res = mul <vscale x 8 x i16> %a, %splat
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @mul_i32_range(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: mul_i32_range
+; CHECK: mov w[[W:[0-9]+]], #255
+; CHECK-NEXT: mov z1.s, w[[W]]
+; CHECK: ptrue p0.s
+; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s
+  %elt = insertelement <vscale x 4 x i32> undef, i32 255, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res = mul <vscale x 4 x i32> %a, %splat
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @mul_i64_range(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: mul_i64_range
+; CHECK: mov w[[W:[0-9]+]], #255
+; CHECK-NEXT: mov z1.d, x[[W]]
+; CHECK: ptrue p0.d
+; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d
+  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res = mul <vscale x 2 x i64> %a, %splat
+  ret <vscale x 2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-mul-pred.ll b/llvm/test/CodeGen/AArch64/sve-int-mul-pred.ll
index 287a33729073..e16dbbfbad3a 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-mul-pred.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-mul-pred.ll
@@ -45,8 +45,8 @@ define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %
 ; CHECK: smulh z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT: ret
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smulh.nxv16i8(<vscale x 16 x i1> %pg,
-                                                               <vscale x 16 x i8> %a,
-                                                               <vscale x 16 x i8> %b)
+                                                                <vscale x 16 x i8> %a,
+                                                                <vscale x 16 x i8> %b)
   ret <vscale x 16 x i8> %out
 }
 
@@ -55,8 +55,8 @@ define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %
 ; CHECK: smulh z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smulh.nxv8i16(<vscale x 8 x i1> %pg,
-                                                               <vscale x 8 x i16> %a,
-                                                               <vscale x 8 x i16> %b)
+                                                                <vscale x 8 x i16> %a,
+                                                                <vscale x 8 x i16> %b)
   ret <vscale x 8 x i16> %out
 }
 
@@ -65,8 +65,8 @@ define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %
 ; CHECK: smulh z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smulh.nxv4i32(<vscale x 4 x i1> %pg,
-                                                               <vscale x 4 x i32> %a,
-                                                               <vscale x 4 x i32> %b)
+                                                                <vscale x 4 x i32> %a,
+                                                                <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %out
 }
 
@@ -75,8 +75,8 @@ define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %
 ; CHECK: smulh z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smulh.nxv2i64(<vscale x 2 x i1> %pg,
-                                                               <vscale x 2 x i64> %a,
-                                                               <vscale x 2 x i64> %b)
+                                                                <vscale x 2 x i64> %a,
+                                                                <vscale x 2 x i64> %b)
   ret <vscale x 2 x i64> %out
 }
 
@@ -85,8 +85,8 @@ define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %
 ; CHECK: umulh z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT: ret
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umulh.nxv16i8(<vscale x 16 x i1> %pg,
-                                                               <vscale x 16 x i8> %a,
-                                                               <vscale x 16 x i8> %b)
+                                                                <vscale x 16 x i8> %a,
+                                                                <vscale x 16 x i8> %b)
   ret <vscale x 16 x i8> %out
 }
 
@@ -95,8 +95,8 @@ define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %
 ; CHECK: umulh z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umulh.nxv8i16(<vscale x 8 x i1> %pg,
-                                                               <vscale x 8 x i16> %a,
-                                                               <vscale x 8 x i16> %b)
+                                                                <vscale x 8 x i16> %a,
+                                                                <vscale x 8 x i16> %b)
   ret <vscale x 8 x i16> %out
 }
 
@@ -105,8 +105,8 @@ define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %
 ; CHECK: umulh z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umulh.nxv4i32(<vscale x 4 x i1> %pg,
-                                                               <vscale x 4 x i32> %a,
-                                                               <vscale x 4 x i32> %b)
+                                                                 <vscale x 4 x i32> %a,
+                                                                 <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %out
 }
 
@@ -115,8 +115,8 @@ define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %
 ; CHECK: umulh z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umulh.nxv2i64(<vscale x 2 x i1> %pg,
-                                                               <vscale x 2 x i64> %a,
-                                                               <vscale x 2 x i64> %b)
+                                                                 <vscale x 2 x i64> %a,
+                                                                 <vscale x 2 x i64> %b)
   ret <vscale x 2 x i64> %out
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm-2.ll b/llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm-2.ll
deleted file mode 100644
index 56d65425dd9e..000000000000
--- a/llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm-2.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: not llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s
-
-; Numbers smaller than -127 and greater than or equal to 127 are not allowed.
-; This should get lowered to a regular vector multiply and these tests should
-; be updated when those patterns are added.
-
-define <vscale x 2 x i64> @mul_i64_neg_1(<vscale x 2 x i64> %a) {
-  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res = mul <vscale x 2 x i64> %a, %splat
-  ret <vscale x 2 x i64> %res
-}

diff  --git a/llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm.ll b/llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm.ll
deleted file mode 100644
index 992b1581559d..000000000000
--- a/llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: not llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s
-
-; Numbers smaller than -127 and greater than or equal to 127 allowed for imm mul.
-; This should get lowered to a regular vector multiply and these tests should
-; be updated when those patterns are added.
-define <vscale x 2 x i64> @mul_i64_neg_1(<vscale x 2 x i64> %a) {
-  %elt = insertelement <vscale x 2 x i64> undef, i64 -130, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res = mul <vscale x 2 x i64> %a, %splat
-  ret <vscale x 2 x i64> %res
-}

diff  --git a/llvm/test/CodeGen/AArch64/sve2-int-mul.ll b/llvm/test/CodeGen/AArch64/sve2-int-mul.ll
new file mode 100644
index 000000000000..6e495b0989c2
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve2-int-mul.ll
@@ -0,0 +1,324 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
+
+;
+; MUL with SPLAT
+;
+define <vscale x 8 x i16> @mul_i16_imm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: mul_i16_imm
+; CHECK: mov w[[W:[0-9]+]], #255
+; CHECK-NEXT: mov z1.h, w[[W]]
+; CHECK-NEXT: mul z0.h, z0.h, z1.h
+  %elt = insertelement <vscale x 8 x i16> undef, i16 255, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res = mul <vscale x 8 x i16> %a, %splat
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @mul_i16_imm_neg(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: mul_i16_imm_neg
+; CHECK: mov w[[W:[0-9]+]], #-200
+; CHECK-NEXT: mov z1.h, w[[W]]
+; CHECK-NEXT: mul z0.h, z0.h, z1.h
+  %elt = insertelement <vscale x 8 x i16> undef, i16 -200, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %res = mul <vscale x 8 x i16> %a, %splat
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @mul_i32_imm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: mul_i32_imm
+; CHECK: mov w[[W:[0-9]+]], #255
+; CHECK-NEXT: mov z1.s, w[[W]]
+; CHECK-NEXT: mul z0.s, z0.s, z1.s
+  %elt = insertelement <vscale x 4 x i32> undef, i32 255, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res = mul <vscale x 4 x i32> %a, %splat
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @mul_i32_imm_neg(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: mul_i32_imm_neg
+; CHECK: mov w[[W:[0-9]+]], #-200
+; CHECK-NEXT: mov z1.s, w[[W]]
+; CHECK-NEXT: mul z0.s, z0.s, z1.s
+  %elt = insertelement <vscale x 4 x i32> undef, i32 -200, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %res = mul <vscale x 4 x i32> %a, %splat
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @mul_i64_imm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: mul_i64_imm
+; CHECK: mov w[[X:[0-9]+]], #255
+; CHECK-NEXT: z1.d, x[[X]]
+; CHECK-NEXT: mul z0.d, z0.d, z1.d
+  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res = mul <vscale x 2 x i64> %a, %splat
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @mul_i64_imm_neg(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: mul_i64_imm_neg
+; CHECK: mov x[[X:[0-9]+]], #-200
+; CHECK-NEXT: z1.d, x[[X]]
+; CHECK-NEXT: mul z0.d, z0.d, z1.d
+  %elt = insertelement <vscale x 2 x i64> undef, i64 -200, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %res = mul <vscale x 2 x i64> %a, %splat
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; MUL (vector, unpredicated)
+;
+define <vscale x 16 x i8> @mul_i8(<vscale x 16 x i8> %a,
+                                  <vscale x 16 x i8> %b) {
+; CHECK-LABEL: mul_i8
+; CHECK: mul z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %res = mul <vscale x 16 x i8> %a, %b
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @mul_i16(<vscale x 8 x i16> %a,
+                                  <vscale x 8 x i16> %b) {
+; CHECK-LABEL: mul_i16
+; CHECK: mul z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %res = mul <vscale x 8 x i16> %a, %b
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @mul_i32(<vscale x 4 x i32> %a,
+                                  <vscale x 4 x i32> %b) {
+; CHECK-LABEL: mul_i32
+; CHECK: mul z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %res = mul <vscale x 4 x i32> %a, %b
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @mul_i64(<vscale x 2 x i64> %a,
+                                  <vscale x 2 x i64> %b) {
+; CHECK-LABEL: mul_i64
+; CHECK: mul z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %res = mul <vscale x 2 x i64> %a, %b
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; SMULH (vector, unpredicated)
+;
+define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i8> %a,
+                                    <vscale x 16 x i8> %b) {
+; CHECK-LABEL: smulh_i8
+; CHECK: smulh z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %sel = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.smulh.nxv16i8(<vscale x 16 x i1> %sel, <vscale x 16 x i8> %a,
+                                                                 <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i16> %a,
+                                     <vscale x 8 x i16> %b) {
+; CHECK-LABEL: smulh_i16
+; CHECK: smulh z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %sel = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
+  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.smulh.nxv8i16(<vscale x 8 x i1> %sel, <vscale x 8 x i16> %a,
+                                                                 <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i32> %a,
+                                     <vscale x 4 x i32> %b) {
+; CHECK-LABEL: smulh_i32
+; CHECK: smulh z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %sel = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smulh.nxv4i32(<vscale x 4 x i1> %sel, <vscale x 4 x i32> %a,
+                                                                 <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i64> %a,
+                                     <vscale x 2 x i64> %b) {
+; CHECK-LABEL: smulh_i64
+; CHECK: smulh z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %sel = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smulh.nxv2i64(<vscale x 2 x i1> %sel, <vscale x 2 x i64> %a,
+                                                                 <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; UMULH (vector, unpredicated)
+;
+define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i8> %a,
+                                    <vscale x 16 x i8> %b) {
+; CHECK-LABEL: umulh_i8
+; CHECK: umulh z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %sel = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.umulh.nxv16i8(<vscale x 16 x i1> %sel, <vscale x 16 x i8> %a,
+                                                                 <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i16> %a,
+                                     <vscale x 8 x i16> %b) {
+; CHECK-LABEL: umulh_i16
+; CHECK: umulh z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %sel = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
+  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.umulh.nxv8i16(<vscale x 8 x i1> %sel, <vscale x 8 x i16> %a,
+                                                                 <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i32> %a,
+                                     <vscale x 4 x i32> %b) {
+; CHECK-LABEL: umulh_i32
+; CHECK: umulh z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %sel = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umulh.nxv4i32(<vscale x 4 x i1> %sel, <vscale x 4 x i32> %a,
+                                                                 <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i64> %a,
+                                     <vscale x 2 x i64> %b) {
+; CHECK-LABEL: umulh_i64
+; CHECK: umulh z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %sel = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umulh.nxv2i64(<vscale x 2 x i1> %sel, <vscale x 2 x i64> %a,
+                                                                 <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; PMUL (vector, unpredicated)
+;
+define <vscale x 16 x i8> @pmul_i8(<vscale x 16 x i8> %a,
+                                   <vscale x 16 x i8> %b) {
+; CHECK-LABEL: pmul_i8
+; CHECK: pmul z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.pmul.nxv16i8(<vscale x 16 x i8> %a,
+                                                                <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %res
+}
+
+;
+; SQDMULH (vector, unpredicated)
+;
+define <vscale x 16 x i8> @sqdmulh_i8(<vscale x 16 x i8> %a,
+                                      <vscale x 16 x i8> %b) {
+; CHECK-LABEL: sqdmulh_i8
+; CHECK: sqdmulh z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqdmulh.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @sqdmulh_i16(<vscale x 8 x i16> %a,
+                                       <vscale x 8 x i16> %b) {
+; CHECK-LABEL: sqdmulh_i16
+; CHECK: sqdmulh z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmulh.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @sqdmulh_i32(<vscale x 4 x i32> %a,
+                                       <vscale x 4 x i32> %b) {
+; CHECK-LABEL: sqdmulh_i32
+; CHECK: sqdmulh z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmulh.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sqdmulh_i64(<vscale x 2 x i64> %a,
+                                       <vscale x 2 x i64> %b) {
+; CHECK-LABEL: sqdmulh_i64
+; CHECK: sqdmulh z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmulh.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; SQRDMULH (vector, unpredicated)
+;
+define <vscale x 16 x i8> @sqrdmulh_i8(<vscale x 16 x i8> %a,
+                                       <vscale x 16 x i8> %b) {
+; CHECK-LABEL: sqrdmulh_i8
+; CHECK: sqrdmulh z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrdmulh.nxv16i8(<vscale x 16 x i8> %a,
+                                                                    <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @sqrdmulh_i16(<vscale x 8 x i16> %a,
+                                        <vscale x 8 x i16> %b) {
+; CHECK-LABEL: sqrdmulh_i16
+; CHECK: sqrdmulh z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmulh.nxv8i16(<vscale x 8 x i16> %a,
+                                                                    <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @sqrdmulh_i32(<vscale x 4 x i32> %a,
+                                        <vscale x 4 x i32> %b) {
+; CHECK-LABEL: sqrdmulh_i32
+; CHECK: sqrdmulh z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmulh.nxv4i32(<vscale x 4 x i32> %a,
+                                                                    <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sqrdmulh_i64(<vscale x 2 x i64> %a,
+                                        <vscale x 2 x i64> %b) {
+; CHECK-LABEL: sqrdmulh_i64
+; CHECK: sqrdmulh z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmulh.nxv2i64(<vscale x 2 x i64> %a,
+                                                                    <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %res
+}
+
+declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32)
+declare <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32)
+declare <vscale x 16 x  i8> @llvm.aarch64.sve.smulh.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
+declare <vscale x  8 x i16> @llvm.aarch64.sve.smulh.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
+declare <vscale x  4 x i32> @llvm.aarch64.sve.smulh.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
+declare <vscale x  2 x i64> @llvm.aarch64.sve.smulh.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
+declare <vscale x 16 x  i8> @llvm.aarch64.sve.umulh.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
+declare <vscale x  8 x i16> @llvm.aarch64.sve.umulh.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
+declare <vscale x  4 x i32> @llvm.aarch64.sve.umulh.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
+declare <vscale x  2 x i64> @llvm.aarch64.sve.umulh.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.pmul.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqdmulh.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqdmulh.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqdmulh.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqdmulh.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrdmulh.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmulh.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmulh.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmulh.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)


        


More information about the llvm-commits mailing list