[llvm] e9cd197 - [LoongArch] Support MULHS/MULHU with lsx/lasx

via llvm-commits llvm-commits at lists.llvm.org
Sun Dec 3 19:00:12 PST 2023


Author: wanglei
Date: 2023-12-04T10:58:05+08:00
New Revision: e9cd197d15300f186a5a32092103add65fbd3f50

URL: https://github.com/llvm/llvm-project/commit/e9cd197d15300f186a5a32092103add65fbd3f50
DIFF: https://github.com/llvm/llvm-project/commit/e9cd197d15300f186a5a32092103add65fbd3f50.diff

LOG: [LoongArch] Support MULHS/MULHU with lsx/lasx

Mark MULHS/MULHU nodes as legal and adds the necessary patterns.

Added: 
    llvm/test/CodeGen/LoongArch/lasx/mulh.ll
    llvm/test/CodeGen/LoongArch/lsx/mulh.ll

Modified: 
    llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
    llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 9f8b2fd07a48d..d297d115d59cb 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -257,6 +257,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
       setOperationAction({ISD::AND, ISD::OR, ISD::XOR}, VT, Legal);
       setOperationAction({ISD::SHL, ISD::SRA, ISD::SRL}, VT, Legal);
       setOperationAction({ISD::CTPOP, ISD::CTLZ}, VT, Legal);
+      setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Legal);
     }
     for (MVT VT : {MVT::v4f32, MVT::v2f64}) {
       setOperationAction({ISD::FADD, ISD::FSUB}, VT, Legal);
@@ -287,6 +288,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
       setOperationAction({ISD::AND, ISD::OR, ISD::XOR}, VT, Legal);
       setOperationAction({ISD::SHL, ISD::SRA, ISD::SRL}, VT, Legal);
       setOperationAction({ISD::CTPOP, ISD::CTLZ}, VT, Legal);
+      setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Legal);
     }
     for (MVT VT : {MVT::v8f32, MVT::v4f64}) {
       setOperationAction({ISD::FADD, ISD::FSUB}, VT, Legal);

diff  --git a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
index 960ac627578cd..240f28b0dc5ae 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
@@ -1217,6 +1217,10 @@ defm : PatXrUimm5<umin, "XVMINI">;
 // XVMUL_{B/H/W/D}
 defm : PatXrXr<mul, "XVMUL">;
 
+// XVMUH_{B/H/W/D}[U]
+defm : PatXrXr<mulhs, "XVMUH">;
+defm : PatXrXrU<mulhu, "XVMUH">;
+
 // XVMADD_{B/H/W/D}
 defm : PatXrXrXr<muladd, "XVMADD">;
 // XVMSUB_{B/H/W/D}

diff  --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
index 3480ade9eebf9..fb4726c530b55 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
@@ -1294,6 +1294,10 @@ defm : PatVrUimm5<umin, "VMINI">;
 // VMUL_{B/H/W/D}
 defm : PatVrVr<mul, "VMUL">;
 
+// VMUH_{B/H/W/D}[U]
+defm : PatVrVr<mulhs, "VMUH">;
+defm : PatVrVrU<mulhu, "VMUH">;
+
 // VMADD_{B/H/W/D}
 defm : PatVrVrVr<muladd, "VMADD">;
 // VMSUB_{B/H/W/D}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/mulh.ll b/llvm/test/CodeGen/LoongArch/lasx/mulh.ll
new file mode 100644
index 0000000000000..aac711a4a371c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/mulh.ll
@@ -0,0 +1,162 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @mulhs_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhs_v32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvmuh.b $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = load <32 x i8>, ptr %a1
+  %v0s = sext <32 x i8> %v0 to <32 x i16>
+  %v1s = sext <32 x i8> %v1 to <32 x i16>
+  %m = mul <32 x i16> %v0s, %v1s
+  %s = ashr <32 x i16> %m, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %v2 = trunc <32 x i16> %s to <32 x i8>
+  store <32 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @mulhu_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhu_v32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvmuh.bu $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = load <32 x i8>, ptr %a1
+  %v0z = zext <32 x i8> %v0 to <32 x i16>
+  %v1z = zext <32 x i8> %v1 to <32 x i16>
+  %m = mul <32 x i16> %v0z, %v1z
+  %s = lshr <32 x i16> %m, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %v2 = trunc <32 x i16> %s to <32 x i8>
+  store <32 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @mulhs_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhs_v16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvmuh.h $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = load <16 x i16>, ptr %a1
+  %v0s = sext <16 x i16> %v0 to <16 x i32>
+  %v1s = sext <16 x i16> %v1 to <16 x i32>
+  %m = mul <16 x i32> %v0s, %v1s
+  %s = ashr <16 x i32> %m, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %v2 = trunc <16 x i32> %s to <16 x i16>
+  store <16 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @mulhu_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhu_v16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvmuh.hu $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = load <16 x i16>, ptr %a1
+  %v0z = zext <16 x i16> %v0 to <16 x i32>
+  %v1z = zext <16 x i16> %v1 to <16 x i32>
+  %m = mul <16 x i32> %v0z, %v1z
+  %s = lshr <16 x i32> %m, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %v2 = trunc <16 x i32> %s to <16 x i16>
+  store <16 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @mulhs_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhs_v8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvmuh.w $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = load <8 x i32>, ptr %a1
+  %v0s = sext <8 x i32> %v0 to <8 x i64>
+  %v1s = sext <8 x i32> %v1 to <8 x i64>
+  %m = mul <8 x i64> %v0s, %v1s
+  %s = ashr <8 x i64> %m, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+  %v2 = trunc <8 x i64> %s to <8 x i32>
+  store <8 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @mulhu_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhu_v8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvmuh.wu $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = load <8 x i32>, ptr %a1
+  %v0z = zext <8 x i32> %v0 to <8 x i64>
+  %v1z = zext <8 x i32> %v1 to <8 x i64>
+  %m = mul <8 x i64> %v0z, %v1z
+  %s = lshr <8 x i64> %m, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+  %v2 = trunc <8 x i64> %s to <8 x i32>
+  store <8 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @mulhs_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhs_v4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvmuh.d $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = load <4 x i64>, ptr %a1
+  %v0s = sext <4 x i64> %v0 to <4 x i128>
+  %v1s = sext <4 x i64> %v1 to <4 x i128>
+  %m = mul <4 x i128> %v0s, %v1s
+  %s = ashr <4 x i128> %m, <i128 64, i128 64, i128 64, i128 64>
+  %v2 = trunc <4 x i128> %s to <4 x i64>
+  store <4 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @mulhu_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhu_v4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvmuh.du $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = load <4 x i64>, ptr %a1
+  %v0z = zext <4 x i64> %v0 to <4 x i128>
+  %v1z = zext <4 x i64> %v1 to <4 x i128>
+  %m = mul <4 x i128> %v0z, %v1z
+  %s = lshr <4 x i128> %m, <i128 64, i128 64, i128 64, i128 64>
+  %v2 = trunc <4 x i128> %s to <4 x i64>
+  store <4 x i64> %v2, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/mulh.ll b/llvm/test/CodeGen/LoongArch/lsx/mulh.ll
new file mode 100644
index 0000000000000..e1388f00e355f
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/mulh.ll
@@ -0,0 +1,162 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @mulhs_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhs_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmuh.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = load <16 x i8>, ptr %a1
+  %v0s = sext <16 x i8> %v0 to <16 x i16>
+  %v1s = sext <16 x i8> %v1 to <16 x i16>
+  %m = mul <16 x i16> %v0s, %v1s
+  %s = ashr <16 x i16> %m, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %v2 = trunc <16 x i16> %s to <16 x i8>
+  store <16 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @mulhu_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhu_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmuh.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = load <16 x i8>, ptr %a1
+  %v0z = zext <16 x i8> %v0 to <16 x i16>
+  %v1z = zext <16 x i8> %v1 to <16 x i16>
+  %m = mul <16 x i16> %v0z, %v1z
+  %s = lshr <16 x i16> %m, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %v2 = trunc <16 x i16> %s to <16 x i8>
+  store <16 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @mulhs_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhs_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmuh.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = load <8 x i16>, ptr %a1
+  %v0s = sext <8 x i16> %v0 to <8 x i32>
+  %v1s = sext <8 x i16> %v1 to <8 x i32>
+  %m = mul <8 x i32> %v0s, %v1s
+  %s = ashr <8 x i32> %m, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %v2 = trunc <8 x i32> %s to <8 x i16>
+  store <8 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @mulhu_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhu_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmuh.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = load <8 x i16>, ptr %a1
+  %v0z = zext <8 x i16> %v0 to <8 x i32>
+  %v1z = zext <8 x i16> %v1 to <8 x i32>
+  %m = mul <8 x i32> %v0z, %v1z
+  %s = lshr <8 x i32> %m, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %v2 = trunc <8 x i32> %s to <8 x i16>
+  store <8 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @mulhs_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhs_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmuh.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = load <4 x i32>, ptr %a1
+  %v0s = sext <4 x i32> %v0 to <4 x i64>
+  %v1s = sext <4 x i32> %v1 to <4 x i64>
+  %m = mul <4 x i64> %v0s, %v1s
+  %s = ashr <4 x i64> %m, <i64 32, i64 32, i64 32, i64 32>
+  %v2 = trunc <4 x i64> %s to <4 x i32>
+  store <4 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @mulhu_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhu_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmuh.wu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = load <4 x i32>, ptr %a1
+  %v0z = zext <4 x i32> %v0 to <4 x i64>
+  %v1z = zext <4 x i32> %v1 to <4 x i64>
+  %m = mul <4 x i64> %v0z, %v1z
+  %s = lshr <4 x i64> %m, <i64 32, i64 32, i64 32, i64 32>
+  %v2 = trunc <4 x i64> %s to <4 x i32>
+  store <4 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @mulhs_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhs_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmuh.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = load <2 x i64>, ptr %a1
+  %v0s = sext <2 x i64> %v0 to <2 x i128>
+  %v1s = sext <2 x i64> %v1 to <2 x i128>
+  %m = mul <2 x i128> %v0s, %v1s
+  %s = ashr <2 x i128> %m, <i128 64, i128 64>
+  %v2 = trunc <2 x i128> %s to <2 x i64>
+  store <2 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @mulhu_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mulhu_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmuh.du $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = load <2 x i64>, ptr %a1
+  %v0z = zext <2 x i64> %v0 to <2 x i128>
+  %v1z = zext <2 x i64> %v1 to <2 x i128>
+  %m = mul <2 x i128> %v0z, %v1z
+  %s = lshr <2 x i128> %m, <i128 64, i128 64>
+  %v2 = trunc <2 x i128> %s to <2 x i64>
+  store <2 x i64> %v2, ptr %res
+  ret void
+}


        


More information about the llvm-commits mailing list