[llvm] [LoongArch] make ABDS/ABDU legal for lsx/lasx (PR #134190)

via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 2 19:05:57 PDT 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-loongarch

Author: None (tangaac)

<details>
<summary>Changes</summary>



---

Patch is 40.09 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/134190.diff


5 Files Affected:

- (modified) llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp (+4) 
- (modified) llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td (+4) 
- (modified) llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td (+4) 
- (modified) llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll (+37-114) 
- (modified) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll (+37-114) 


``````````diff
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 900775eedfa7b..edb74eeb84b16 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -272,6 +272,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
           {ISD::SETNE, ISD::SETGE, ISD::SETGT, ISD::SETUGE, ISD::SETUGT}, VT,
           Expand);
       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
+      setOperationAction(ISD::ABDS, VT, Legal);
+      setOperationAction(ISD::ABDU, VT, Legal);
     }
     for (MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
       setOperationAction(ISD::BITREVERSE, VT, Custom);
@@ -336,6 +338,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
           {ISD::SETNE, ISD::SETGE, ISD::SETGT, ISD::SETUGE, ISD::SETUGT}, VT,
           Expand);
       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
+      setOperationAction(ISD::ABDS, VT, Legal);
+      setOperationAction(ISD::ABDU, VT, Legal);
     }
     for (MVT VT : {MVT::v32i8, MVT::v16i16, MVT::v8i32})
       setOperationAction(ISD::BITREVERSE, VT, Custom);
diff --git a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
index 7022fddf34100..415aa5f7820d2 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
@@ -1817,6 +1817,10 @@ def : Pat<(vt (concat_vectors LSX128:$vd, LSX128:$vj)),
           (XVPERMI_Q (SUBREG_TO_REG (i64 0), LSX128:$vd, sub_128),
                      (SUBREG_TO_REG (i64 0), LSX128:$vj, sub_128), 2)>;
 
+// XVABSD_{B/H/W/D}[U]
+defm : PatXrXr<abds, "XVABSD">;
+defm : PatXrXrU<abdu, "XVABSD">;
+
 } // Predicates = [HasExtLASX]
 
 /// Intrinsic pattern
diff --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
index e37de4f545a2a..3e7aee04c11f8 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
@@ -1973,6 +1973,10 @@ def : Pat<(f32 f32imm_vldi:$in),
 def : Pat<(f64 f64imm_vldi:$in),
           (f64 (EXTRACT_SUBREG (VLDI (to_f64imm_vldi f64imm_vldi:$in)), sub_64))>;
 
+// VABSD_{B/H/W/D}[U]
+defm : PatVrVr<abds, "VABSD">;
+defm : PatVrVrU<abdu, "VABSD">;
+
 } // Predicates = [HasExtLSX]
 
 /// Intrinsic pattern
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
index bd5b16f5147a2..c5df9f8420837 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
@@ -1,7 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
 
-;; TODO: Currently LoongArch generates sub-optimal code for these cases
 ;; 1. trunc(abs(sub(sext(a),sext(b)))) -> abds(a,b) or abdu(a,b)
 ;; 2. abs(sub_nsw(x, y)) -> abds(a,b)
 ;; 3. sub(smax(a,b),smin(a,b)) -> abds(a,b) or abdu(a,b)
@@ -9,16 +8,12 @@
 ;; 5. sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abds(a,b) or abdu(a,b)
 ;;
 ;; abds / abdu can be lowered to xvabsd.{b/h/w/d} / xvabsd.{b/h/w/d}u  instruction.
-;;
-;; Later patch will address it.
 
 ;; trunc(abs(sub(sext(a),sext(b)))) -> abds(a,b)
 define <32 x i8> @xvabsd_b(<32 x i8> %a, <32 x i8> %b) {
 ; CHECK-LABEL: xvabsd_b:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.b $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.b $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a.sext = sext <32 x i8> %a to <32 x i16>
   %b.sext = sext <32 x i8> %b to <32 x i16>
@@ -31,9 +26,7 @@ define <32 x i8> @xvabsd_b(<32 x i8> %a, <32 x i8> %b) {
 define <16 x i16> @xvabsd_h(<16 x i16> %a, <16 x i16> %b) {
 ; CHECK-LABEL: xvabsd_h:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.h $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.h $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a.sext = sext <16 x i16> %a to <16 x i32>
   %b.sext = sext <16 x i16> %b to <16 x i32>
@@ -46,9 +39,7 @@ define <16 x i16> @xvabsd_h(<16 x i16> %a, <16 x i16> %b) {
 define <8 x i32> @xvabsd_w(<8 x i32> %a, <8 x i32> %b) {
 ; CHECK-LABEL: xvabsd_w:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.w $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.w $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a.sext = sext <8 x i32> %a to <8 x i64>
   %b.sext = sext <8 x i32> %b to <8 x i64>
@@ -61,9 +52,7 @@ define <8 x i32> @xvabsd_w(<8 x i32> %a, <8 x i32> %b) {
 define <4 x i64> @xvabsd_d(<4 x i64> %a, <4 x i64> %b) {
 ; CHECK-LABEL: xvabsd_d:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.d $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.d $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a.sext = sext <4 x i64> %a to <4 x i128>
   %b.sext = sext <4 x i64> %b to <4 x i128>
@@ -76,9 +65,7 @@ define <4 x i64> @xvabsd_d(<4 x i64> %a, <4 x i64> %b) {
 define <32 x i8> @xvabsd_bu(<32 x i8> %a, <32 x i8> %b) {
 ; CHECK-LABEL: xvabsd_bu:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.bu $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.bu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a.zext = zext <32 x i8> %a to <32 x i16>
   %b.zext = zext <32 x i8> %b to <32 x i16>
@@ -91,9 +78,7 @@ define <32 x i8> @xvabsd_bu(<32 x i8> %a, <32 x i8> %b) {
 define <16 x i16> @xvabsd_hu(<16 x i16> %a, <16 x i16> %b) {
 ; CHECK-LABEL: xvabsd_hu:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.hu $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.hu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a.zext = zext <16 x i16> %a to <16 x i32>
   %b.zext = zext <16 x i16> %b to <16 x i32>
@@ -106,9 +91,7 @@ define <16 x i16> @xvabsd_hu(<16 x i16> %a, <16 x i16> %b) {
 define <8 x i32> @xvabsd_wu(<8 x i32> %a, <8 x i32> %b) {
 ; CHECK-LABEL: xvabsd_wu:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.wu $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.wu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a.zext = zext <8 x i32> %a to <8 x i64>
   %b.zext = zext <8 x i32> %b to <8 x i64>
@@ -121,9 +104,7 @@ define <8 x i32> @xvabsd_wu(<8 x i32> %a, <8 x i32> %b) {
 define <4 x i64> @xvabsd_du(<4 x i64> %a, <4 x i64> %b) {
 ; CHECK-LABEL: xvabsd_du:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.du $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.du $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a.zext = zext <4 x i64> %a to <4 x i128>
   %b.zext = zext <4 x i64> %b to <4 x i128>
@@ -137,9 +118,7 @@ define <4 x i64> @xvabsd_du(<4 x i64> %a, <4 x i64> %b) {
 define <32 x i8> @xvabsd_b_nsw(<32 x i8> %a, <32 x i8> %b) {
 ; CHECK-LABEL: xvabsd_b_nsw:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvneg.b $xr1, $xr0
-; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvabsd.b $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %sub = sub nsw <32 x i8> %a, %b
   %abs = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %sub, i1 true)
@@ -149,9 +128,7 @@ define <32 x i8> @xvabsd_b_nsw(<32 x i8> %a, <32 x i8> %b) {
 define <16 x i16> @xvabsd_h_nsw(<16 x i16> %a, <16 x i16> %b) {
 ; CHECK-LABEL: xvabsd_h_nsw:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvneg.h $xr1, $xr0
-; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvabsd.h $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %sub = sub nsw <16 x i16> %a, %b
   %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
@@ -161,9 +138,7 @@ define <16 x i16> @xvabsd_h_nsw(<16 x i16> %a, <16 x i16> %b) {
 define <8 x i32> @xvabsd_w_nsw(<8 x i32> %a, <8 x i32> %b) {
 ; CHECK-LABEL: xvabsd_w_nsw:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvneg.w $xr1, $xr0
-; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvabsd.w $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %sub = sub nsw <8 x i32> %a, %b
   %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
@@ -173,9 +148,7 @@ define <8 x i32> @xvabsd_w_nsw(<8 x i32> %a, <8 x i32> %b) {
 define <4 x i64> @xvabsd_d_nsw(<4 x i64> %a, <4 x i64> %b) {
 ; CHECK-LABEL: xvabsd_d_nsw:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvneg.d $xr1, $xr0
-; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvabsd.d $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %sub = sub nsw <4 x i64> %a, %b
   %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
@@ -186,9 +159,7 @@ define <4 x i64> @xvabsd_d_nsw(<4 x i64> %a, <4 x i64> %b) {
 define <32 x i8> @maxmin_b(<32 x i8> %0, <32 x i8> %1) {
 ; CHECK-LABEL: maxmin_b:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.b $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.b $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a = tail call <32 x i8> @llvm.smax.v32i8(<32 x i8> %0, <32 x i8> %1)
   %b = tail call <32 x i8> @llvm.smin.v32i8(<32 x i8> %0, <32 x i8> %1)
@@ -199,9 +170,7 @@ define <32 x i8> @maxmin_b(<32 x i8> %0, <32 x i8> %1) {
 define <16 x i16> @maxmin_h(<16 x i16> %0, <16 x i16> %1) {
 ; CHECK-LABEL: maxmin_h:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.h $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.h $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> %0, <16 x i16> %1)
   %b = tail call <16 x i16> @llvm.smin.v16i16(<16 x i16> %0, <16 x i16> %1)
@@ -212,9 +181,7 @@ define <16 x i16> @maxmin_h(<16 x i16> %0, <16 x i16> %1) {
 define <8 x i32> @maxmin_w(<8 x i32> %0, <8 x i32> %1) {
 ; CHECK-LABEL: maxmin_w:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.w $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.w $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a = tail call <8 x i32> @llvm.smax.v8i32(<8 x i32> %0, <8 x i32> %1)
   %b = tail call <8 x i32> @llvm.smin.v8i32(<8 x i32> %0, <8 x i32> %1)
@@ -225,9 +192,7 @@ define <8 x i32> @maxmin_w(<8 x i32> %0, <8 x i32> %1) {
 define <4 x i64> @maxmin_d(<4 x i64> %0, <4 x i64> %1) {
 ; CHECK-LABEL: maxmin_d:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.d $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.d $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %0, <4 x i64> %1)
   %b = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %0, <4 x i64> %1)
@@ -238,9 +203,7 @@ define <4 x i64> @maxmin_d(<4 x i64> %0, <4 x i64> %1) {
 define <32 x i8> @maxmin_bu(<32 x i8> %0, <32 x i8> %1) {
 ; CHECK-LABEL: maxmin_bu:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.bu $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.bu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a = tail call <32 x i8> @llvm.umax.v32i8(<32 x i8> %0, <32 x i8> %1)
   %b = tail call <32 x i8> @llvm.umin.v32i8(<32 x i8> %0, <32 x i8> %1)
@@ -251,9 +214,7 @@ define <32 x i8> @maxmin_bu(<32 x i8> %0, <32 x i8> %1) {
 define <16 x i16> @maxmin_hu(<16 x i16> %0, <16 x i16> %1) {
 ; CHECK-LABEL: maxmin_hu:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.hu $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.hu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a = tail call <16 x i16> @llvm.umax.v16i16(<16 x i16> %0, <16 x i16> %1)
   %b = tail call <16 x i16> @llvm.umin.v16i16(<16 x i16> %0, <16 x i16> %1)
@@ -264,9 +225,7 @@ define <16 x i16> @maxmin_hu(<16 x i16> %0, <16 x i16> %1) {
 define <8 x i32> @maxmin_wu(<8 x i32> %0, <8 x i32> %1) {
 ; CHECK-LABEL: maxmin_wu:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.wu $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.wu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a = tail call <8 x i32> @llvm.umax.v8i32(<8 x i32> %0, <8 x i32> %1)
   %b = tail call <8 x i32> @llvm.umin.v8i32(<8 x i32> %0, <8 x i32> %1)
@@ -277,9 +236,7 @@ define <8 x i32> @maxmin_wu(<8 x i32> %0, <8 x i32> %1) {
 define <4 x i64> @maxmin_du(<4 x i64> %0, <4 x i64> %1) {
 ; CHECK-LABEL: maxmin_du:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.du $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.du $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a = tail call <4 x i64> @llvm.umax.v4i64(<4 x i64> %0, <4 x i64> %1)
   %b = tail call <4 x i64> @llvm.umin.v4i64(<4 x i64> %0, <4 x i64> %1)
@@ -290,9 +247,7 @@ define <4 x i64> @maxmin_du(<4 x i64> %0, <4 x i64> %1) {
 define <32 x i8> @maxmin_bu_com1(<32 x i8> %0, <32 x i8> %1) {
 ; CHECK-LABEL: maxmin_bu_com1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.bu $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.bu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %a = tail call <32 x i8> @llvm.umax.v32i8(<32 x i8> %0, <32 x i8> %1)
   %b = tail call <32 x i8> @llvm.umin.v32i8(<32 x i8> %1, <32 x i8> %0)
@@ -304,9 +259,7 @@ define <32 x i8> @maxmin_bu_com1(<32 x i8> %0, <32 x i8> %1) {
 define <32 x i8> @xvabsd_b_cmp(<32 x i8> %a, <32 x i8> %b) nounwind {
 ; CHECK-LABEL: xvabsd_b_cmp:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.b $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.b $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %cmp = icmp slt <32 x i8> %a, %b
   %ab = sub <32 x i8> %a, %b
@@ -318,9 +271,7 @@ define <32 x i8> @xvabsd_b_cmp(<32 x i8> %a, <32 x i8> %b) nounwind {
 define <16 x i16> @xvabsd_h_cmp(<16 x i16> %a, <16 x i16> %b) nounwind {
 ; CHECK-LABEL: xvabsd_h_cmp:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.h $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.h $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %cmp = icmp slt <16 x i16> %a, %b
   %ab = sub <16 x i16> %a, %b
@@ -332,9 +283,7 @@ define <16 x i16> @xvabsd_h_cmp(<16 x i16> %a, <16 x i16> %b) nounwind {
 define <8 x i32> @xvabsd_w_cmp(<8 x i32> %a, <8 x i32> %b) nounwind {
 ; CHECK-LABEL: xvabsd_w_cmp:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.w $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.w $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %cmp = icmp slt <8 x i32> %a, %b
   %ab = sub <8 x i32> %a, %b
@@ -346,9 +295,7 @@ define <8 x i32> @xvabsd_w_cmp(<8 x i32> %a, <8 x i32> %b) nounwind {
 define <4 x i64> @xvabsd_d_cmp(<4 x i64> %a, <4 x i64> %b) nounwind {
 ; CHECK-LABEL: xvabsd_d_cmp:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.d $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.d $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %cmp = icmp slt <4 x i64> %a, %b
   %ab = sub <4 x i64> %a, %b
@@ -360,9 +307,7 @@ define <4 x i64> @xvabsd_d_cmp(<4 x i64> %a, <4 x i64> %b) nounwind {
 define <32 x i8> @xvabsd_bu_cmp(<32 x i8> %a, <32 x i8> %b) nounwind {
 ; CHECK-LABEL: xvabsd_bu_cmp:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.bu $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.bu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %cmp = icmp ult <32 x i8> %a, %b
   %ab = sub <32 x i8> %a, %b
@@ -374,9 +319,7 @@ define <32 x i8> @xvabsd_bu_cmp(<32 x i8> %a, <32 x i8> %b) nounwind {
 define <16 x i16> @xvabsd_hu_cmp(<16 x i16> %a, <16 x i16> %b) nounwind {
 ; CHECK-LABEL: xvabsd_hu_cmp:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.hu $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.hu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %cmp = icmp ult <16 x i16> %a, %b
   %ab = sub <16 x i16> %a, %b
@@ -388,9 +331,7 @@ define <16 x i16> @xvabsd_hu_cmp(<16 x i16> %a, <16 x i16> %b) nounwind {
 define <8 x i32> @xvabsd_wu_cmp(<8 x i32> %a, <8 x i32> %b) nounwind {
 ; CHECK-LABEL: xvabsd_wu_cmp:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.wu $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.wu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %cmp = icmp ult <8 x i32> %a, %b
   %ab = sub <8 x i32> %a, %b
@@ -402,9 +343,7 @@ define <8 x i32> @xvabsd_wu_cmp(<8 x i32> %a, <8 x i32> %b) nounwind {
 define <4 x i64> @xvabsd_du_cmp(<4 x i64> %a, <4 x i64> %b) nounwind {
 ; CHECK-LABEL: xvabsd_du_cmp:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.du $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.du $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %cmp = icmp ult <4 x i64> %a, %b
   %ab = sub <4 x i64> %a, %b
@@ -417,9 +356,7 @@ define <4 x i64> @xvabsd_du_cmp(<4 x i64> %a, <4 x i64> %b) nounwind {
 define <32 x i8> @xvabsd_b_select(<32 x i8> %a, <32 x i8> %b) nounwind {
 ; CHECK-LABEL: xvabsd_b_select:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.b $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.b $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %cmp = icmp slt <32 x i8> %a, %b
   %ab = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
@@ -431,9 +368,7 @@ define <32 x i8> @xvabsd_b_select(<32 x i8> %a, <32 x i8> %b) nounwind {
 define <16 x i16> @xvabsd_h_select(<16 x i16> %a, <16 x i16> %b) nounwind {
 ; CHECK-LABEL: xvabsd_h_select:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.h $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.h $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %cmp = icmp sle <16 x i16> %a, %b
   %ab = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
@@ -445,9 +380,7 @@ define <16 x i16> @xvabsd_h_select(<16 x i16> %a, <16 x i16> %b) nounwind {
 define <8 x i32> @xvabsd_w_select(<8 x i32> %a, <8 x i32> %b) nounwind {
 ; CHECK-LABEL: xvabsd_w_select:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.w $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.w $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %cmp = icmp sgt <8 x i32> %a, %b
   %ab = select <8 x i1> %cmp, <8 x i32> %a, <8 x i32> %b
@@ -459,9 +392,7 @@ define <8 x i32> @xvabsd_w_select(<8 x i32> %a, <8 x i32> %b) nounwind {
 define <4 x i64> @xvabsd_d_select(<4 x i64> %a, <4 x i64> %b) nounwind {
 ; CHECK-LABEL: xvabsd_d_select:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.d $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.d $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %cmp = icmp sge <4 x i64> %a, %b
   %ab = select <4 x i1> %cmp, <4 x i64> %a, <4 x i64> %b
@@ -473,9 +404,7 @@ define <4 x i64> @xvabsd_d_select(<4 x i64> %a, <4 x i64> %b) nounwind {
 define <32 x i8> @xvabsd_bu_select(<32 x i8> %a, <32 x i8> %b) nounwind {
 ; CHECK-LABEL: xvabsd_bu_select:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvmin.bu $xr2, $xr0, $xr1
-; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvabsd.bu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %cmp = icmp ult <32 x i8> %a, %b
   %ab = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
@@ -487,9 +416,7 @@ define <32 x i8> @xvabsd_bu_select(<32 x i8> %a, <32 x i8> %b) nounwind {
 define <16 x i16> @xvabsd_...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/134190


More information about the llvm-commits mailing list