[llvm] [LoongArch] Pre-commit tests for absolute difference (PR #132898)

via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 26 01:14:51 PDT 2025


https://github.com/tangaac updated https://github.com/llvm/llvm-project/pull/132898

>From 4ab63d965d3801e9ccf44d07ac146d69d8f89f48 Mon Sep 17 00:00:00 2001
From: tangaac <tangyan01 at loongson.cn>
Date: Tue, 25 Mar 2025 16:49:07 +0800
Subject: [PATCH 1/5] Pre-commit tests for absolute difference

---
 .../LoongArch/lasx/ir-instruction/absd.ll     | 326 ++++++++++++++++++
 .../LoongArch/lsx/ir-instruction/absd.ll      | 326 ++++++++++++++++++
 2 files changed, 652 insertions(+)
 create mode 100644 llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
 create mode 100644 llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll

diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
new file mode 100644
index 0000000000000..42152bc7f5fcf
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
@@ -0,0 +1,326 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
+
+;; Mostly copy from AArch64/neon-abd.ll
+
+;
+; VABDS_[B/H/W/D]
+;
+define <32 x i8> @xvabsd_b(<32 x i8> %a, <32 x i8> %b) #0 {
+; CHECK-LABEL: xvabsd_b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.b $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a.sext = sext <32 x i8> %a to <32 x i16>
+  %b.sext = sext <32 x i8> %b to <32 x i16>
+  %sub = sub <32 x i16> %a.sext, %b.sext
+  %abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true)
+  %trunc = trunc <32 x i16> %abs to <32 x i8>
+  ret <32 x i8> %trunc
+}
+
+define <16 x i16> @xvabsd_h(<16 x i16> %a, <16 x i16> %b) #0 {
+; CHECK-LABEL: xvabsd_h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.h $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a.sext = sext <16 x i16> %a to <16 x i32>
+  %b.sext = sext <16 x i16> %b to <16 x i32>
+  %sub = sub <16 x i32> %a.sext, %b.sext
+  %abs = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %sub, i1 true)
+  %trunc = trunc <16 x i32> %abs to <16 x i16>
+  ret <16 x i16> %trunc
+}
+
+define <8 x i32> @xvabsd_w(<8 x i32> %a, <8 x i32> %b) #0 {
+; CHECK-LABEL: xvabsd_w:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.w $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a.sext = sext <8 x i32> %a to <8 x i64>
+  %b.sext = sext <8 x i32> %b to <8 x i64>
+  %sub = sub <8 x i64> %a.sext, %b.sext
+  %abs = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %sub, i1 true)
+  %trunc = trunc <8 x i64> %abs to <8 x i32>
+  ret <8 x i32> %trunc
+}
+
+define <4 x i64> @xvabsd_d(<4 x i64> %a, <4 x i64> %b) #0 {
+; CHECK-LABEL: xvabsd_d:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.d $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a.sext = sext <4 x i64> %a to <4 x i128>
+  %b.sext = sext <4 x i64> %b to <4 x i128>
+  %sub = sub <4 x i128> %a.sext, %b.sext
+  %abs = call <4 x i128> @llvm.abs.v4i128(<4 x i128> %sub, i1 true)
+  %trunc = trunc <4 x i128> %abs to <4 x i64>
+  ret <4 x i64> %trunc
+}
+
+;
+; xvabsd_[B/H/W/D]U
+;
+
+define <32 x i8> @xvabsd_bu(<32 x i8> %a, <32 x i8> %b) #0 {
+; CHECK-LABEL: xvabsd_bu:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.bu $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a.zext = zext <32 x i8> %a to <32 x i16>
+  %b.zext = zext <32 x i8> %b to <32 x i16>
+  %sub = sub <32 x i16> %a.zext, %b.zext
+  %abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true)
+  %trunc = trunc <32 x i16> %abs to <32 x i8>
+  ret <32 x i8> %trunc
+}
+
+define <16 x i16> @xvabsd_hu(<16 x i16> %a, <16 x i16> %b) #0 {
+; CHECK-LABEL: xvabsd_hu:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.hu $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a.zext = zext <16 x i16> %a to <16 x i32>
+  %b.zext = zext <16 x i16> %b to <16 x i32>
+  %sub = sub <16 x i32> %a.zext, %b.zext
+  %abs = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %sub, i1 true)
+  %trunc = trunc <16 x i32> %abs to <16 x i16>
+  ret <16 x i16> %trunc
+}
+
+define <8 x i32> @xvabsd_wu(<8 x i32> %a, <8 x i32> %b) #0 {
+; CHECK-LABEL: xvabsd_wu:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.wu $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a.zext = zext <8 x i32> %a to <8 x i64>
+  %b.zext = zext <8 x i32> %b to <8 x i64>
+  %sub = sub <8 x i64> %a.zext, %b.zext
+  %abs = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %sub, i1 true)
+  %trunc = trunc <8 x i64> %abs to <8 x i32>
+  ret <8 x i32> %trunc
+}
+
+define <4 x i64> @xvabsd_du(<4 x i64> %a, <4 x i64> %b) #0 {
+; CHECK-LABEL: xvabsd_du:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.du $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a.zext = zext <4 x i64> %a to <4 x i128>
+  %b.zext = zext <4 x i64> %b to <4 x i128>
+  %sub = sub <4 x i128> %a.zext, %b.zext
+  %abs = call <4 x i128> @llvm.abs.v4i128(<4 x i128> %sub, i1 true)
+  %trunc = trunc <4 x i128> %abs to <4 x i64>
+  ret <4 x i64> %trunc
+}
+
+define <32 x i8> @xvabsd_v32i8_nsw(<32 x i8> %a, <32 x i8> %b) #0 {
+; CHECK-LABEL: xvabsd_v32i8_nsw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvneg.b $xr1, $xr0
+; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+  %sub = sub nsw <32 x i8> %a, %b
+  %abs = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %sub, i1 true)
+  ret <32 x i8> %abs
+}
+
+define <16 x i16> @xvabsd_v16i16_nsw(<16 x i16> %a, <16 x i16> %b) #0 {
+; CHECK-LABEL: xvabsd_v16i16_nsw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvneg.h $xr1, $xr0
+; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+  %sub = sub nsw <16 x i16> %a, %b
+  %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+  ret <16 x i16> %abs
+}
+
+define <8 x i32> @xvabsd_v8i32_nsw(<8 x i32> %a, <8 x i32> %b) #0 {
+; CHECK-LABEL: xvabsd_v8i32_nsw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvneg.w $xr1, $xr0
+; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+  %sub = sub nsw <8 x i32> %a, %b
+  %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+  ret <8 x i32> %abs
+}
+
+define <4 x i64> @xvabsd_v4i64_nsw(<4 x i64> %a, <4 x i64> %b) #0 {
+; CHECK-LABEL: xvabsd_v4i64_nsw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvneg.d $xr1, $xr0
+; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+  %sub = sub nsw <4 x i64> %a, %b
+  %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+  ret <4 x i64> %abs
+}
+
+define <32 x i8> @smaxmin_v32i8(<32 x i8> %0, <32 x i8> %1) {
+; CHECK-LABEL: smaxmin_v32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.b $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a = tail call <32 x i8> @llvm.smax.v32i8(<32 x i8> %0, <32 x i8> %1)
+  %b = tail call <32 x i8> @llvm.smin.v32i8(<32 x i8> %0, <32 x i8> %1)
+  %sub = sub <32 x i8> %a, %b
+  ret <32 x i8> %sub
+}
+
+define <16 x i16> @smaxmin_v16i16(<16 x i16> %0, <16 x i16> %1) {
+; CHECK-LABEL: smaxmin_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.h $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> %0, <16 x i16> %1)
+  %b = tail call <16 x i16> @llvm.smin.v16i16(<16 x i16> %0, <16 x i16> %1)
+  %sub = sub <16 x i16> %a, %b
+  ret <16 x i16> %sub
+}
+
+define <8 x i32> @smaxmin_v8i32(<8 x i32> %0, <8 x i32> %1) {
+; CHECK-LABEL: smaxmin_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.w $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a = tail call <8 x i32> @llvm.smax.v8i32(<8 x i32> %0, <8 x i32> %1)
+  %b = tail call <8 x i32> @llvm.smin.v8i32(<8 x i32> %0, <8 x i32> %1)
+  %sub = sub <8 x i32> %a, %b
+  ret <8 x i32> %sub
+}
+
+define <4 x i64> @smaxmin_v4i64(<4 x i64> %0, <4 x i64> %1) {
+; CHECK-LABEL: smaxmin_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.d $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %0, <4 x i64> %1)
+  %b = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %0, <4 x i64> %1)
+  %sub = sub <4 x i64> %a, %b
+  ret <4 x i64> %sub
+}
+
+define <32 x i8> @umaxmin_v32i8(<32 x i8> %0, <32 x i8> %1) {
+; CHECK-LABEL: umaxmin_v32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.bu $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a = tail call <32 x i8> @llvm.umax.v32i8(<32 x i8> %0, <32 x i8> %1)
+  %b = tail call <32 x i8> @llvm.umin.v32i8(<32 x i8> %0, <32 x i8> %1)
+  %sub = sub <32 x i8> %a, %b
+  ret <32 x i8> %sub
+}
+
+define <16 x i16> @umaxmin_v16i16(<16 x i16> %0, <16 x i16> %1) {
+; CHECK-LABEL: umaxmin_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.hu $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a = tail call <16 x i16> @llvm.umax.v16i16(<16 x i16> %0, <16 x i16> %1)
+  %b = tail call <16 x i16> @llvm.umin.v16i16(<16 x i16> %0, <16 x i16> %1)
+  %sub = sub <16 x i16> %a, %b
+  ret <16 x i16> %sub
+}
+
+define <8 x i32> @umaxmin_v8i32(<8 x i32> %0, <8 x i32> %1) {
+; CHECK-LABEL: umaxmin_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.wu $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a = tail call <8 x i32> @llvm.umax.v8i32(<8 x i32> %0, <8 x i32> %1)
+  %b = tail call <8 x i32> @llvm.umin.v8i32(<8 x i32> %0, <8 x i32> %1)
+  %sub = sub <8 x i32> %a, %b
+  ret <8 x i32> %sub
+}
+
+define <4 x i64> @umaxmin_v4i64(<4 x i64> %0, <4 x i64> %1) {
+; CHECK-LABEL: umaxmin_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.du $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a = tail call <4 x i64> @llvm.umax.v4i64(<4 x i64> %0, <4 x i64> %1)
+  %b = tail call <4 x i64> @llvm.umin.v4i64(<4 x i64> %0, <4 x i64> %1)
+  %sub = sub <4 x i64> %a, %b
+  ret <4 x i64> %sub
+}
+
+define <32 x i8> @umaxmin_v32i8_com1(<32 x i8> %0, <32 x i8> %1) {
+; CHECK-LABEL: umaxmin_v32i8_com1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.bu $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %a = tail call <32 x i8> @llvm.umax.v32i8(<32 x i8> %0, <32 x i8> %1)
+  %b = tail call <32 x i8> @llvm.umin.v32i8(<32 x i8> %1, <32 x i8> %0)
+  %sub = sub <32 x i8> %a, %b
+  ret <32 x i8> %sub
+}
+
+declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1)
+
+declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
+declare <32 x i16> @llvm.abs.v32i16(<32 x i16>, i1)
+
+declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
+declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1)
+
+declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
+declare <8 x i64> @llvm.abs.v8i64(<8 x i64>, i1)
+
+declare <4 x i128> @llvm.abs.v4i128(<4 x i128>, i1)
+
+declare <32 x i8> @llvm.smax.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.smin.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.umax.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.umax.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.umax.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.umin.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.umin.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll
new file mode 100644
index 0000000000000..3ecfaed8e1505
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll
@@ -0,0 +1,326 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s
+
+;; Mostly copy from AArch64/neon-abd.ll
+
+;
+; VABDS_[B/H/W/D]
+;
+define <16 x i8> @vabsd_b(<16 x i8> %a, <16 x i8> %b) #0 {
+; CHECK-LABEL: vabsd_b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.b $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a.sext = sext <16 x i8> %a to <16 x i16>
+  %b.sext = sext <16 x i8> %b to <16 x i16>
+  %sub = sub <16 x i16> %a.sext, %b.sext
+  %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+  %trunc = trunc <16 x i16> %abs to <16 x i8>
+  ret <16 x i8> %trunc
+}
+
+define <8 x i16> @vabsd_h(<8 x i16> %a, <8 x i16> %b) #0 {
+; CHECK-LABEL: vabsd_h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.h $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.h $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a.sext = sext <8 x i16> %a to <8 x i32>
+  %b.sext = sext <8 x i16> %b to <8 x i32>
+  %sub = sub <8 x i32> %a.sext, %b.sext
+  %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+  %trunc = trunc <8 x i32> %abs to <8 x i16>
+  ret <8 x i16> %trunc
+}
+
+define <4 x i32> @vabsd_w(<4 x i32> %a, <4 x i32> %b) #0 {
+; CHECK-LABEL: vabsd_w:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.w $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.w $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a.sext = sext <4 x i32> %a to <4 x i64>
+  %b.sext = sext <4 x i32> %b to <4 x i64>
+  %sub = sub <4 x i64> %a.sext, %b.sext
+  %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+  %trunc = trunc <4 x i64> %abs to <4 x i32>
+  ret <4 x i32> %trunc
+}
+
+define <2 x i64> @vabsd_d(<2 x i64> %a, <2 x i64> %b) #0 {
+; CHECK-LABEL: vabsd_d:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.d $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.d $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a.sext = sext <2 x i64> %a to <2 x i128>
+  %b.sext = sext <2 x i64> %b to <2 x i128>
+  %sub = sub <2 x i128> %a.sext, %b.sext
+  %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
+  %trunc = trunc <2 x i128> %abs to <2 x i64>
+  ret <2 x i64> %trunc
+}
+
+;
+; VABSD_[B/H/W/D]U
+;
+
+define <16 x i8> @vabsd_bu(<16 x i8> %a, <16 x i8> %b) #0 {
+; CHECK-LABEL: vabsd_bu:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.bu $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a.zext = zext <16 x i8> %a to <16 x i16>
+  %b.zext = zext <16 x i8> %b to <16 x i16>
+  %sub = sub <16 x i16> %a.zext, %b.zext
+  %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+  %trunc = trunc <16 x i16> %abs to <16 x i8>
+  ret <16 x i8> %trunc
+}
+
+define <8 x i16> @vabsd_hu(<8 x i16> %a, <8 x i16> %b) #0 {
+; CHECK-LABEL: vabsd_hu:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.hu $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.h $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a.zext = zext <8 x i16> %a to <8 x i32>
+  %b.zext = zext <8 x i16> %b to <8 x i32>
+  %sub = sub <8 x i32> %a.zext, %b.zext
+  %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+  %trunc = trunc <8 x i32> %abs to <8 x i16>
+  ret <8 x i16> %trunc
+}
+
+define <4 x i32> @vabsd_wu(<4 x i32> %a, <4 x i32> %b) #0 {
+; CHECK-LABEL: vabsd_wu:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.wu $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.w $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a.zext = zext <4 x i32> %a to <4 x i64>
+  %b.zext = zext <4 x i32> %b to <4 x i64>
+  %sub = sub <4 x i64> %a.zext, %b.zext
+  %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+  %trunc = trunc <4 x i64> %abs to <4 x i32>
+  ret <4 x i32> %trunc
+}
+
+define <2 x i64> @vabsd_du(<2 x i64> %a, <2 x i64> %b) #0 {
+; CHECK-LABEL: vabsd_du:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.du $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.d $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a.zext = zext <2 x i64> %a to <2 x i128>
+  %b.zext = zext <2 x i64> %b to <2 x i128>
+  %sub = sub <2 x i128> %a.zext, %b.zext
+  %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
+  %trunc = trunc <2 x i128> %abs to <2 x i64>
+  ret <2 x i64> %trunc
+}
+
+define <16 x i8> @vabsd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) #0 {
+; CHECK-LABEL: vabsd_v16i8_nsw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vneg.b $vr1, $vr0
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+  %sub = sub nsw <16 x i8> %a, %b
+  %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
+  ret <16 x i8> %abs
+}
+
+define <8 x i16> @vabsd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) #0 {
+; CHECK-LABEL: vabsd_v8i16_nsw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsub.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vneg.h $vr1, $vr0
+; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+  %sub = sub nsw <8 x i16> %a, %b
+  %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+  ret <8 x i16> %abs
+}
+
+define <4 x i32> @vabsd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) #0 {
+; CHECK-LABEL: vabsd_v4i32_nsw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsub.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vneg.w $vr1, $vr0
+; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+  %sub = sub nsw <4 x i32> %a, %b
+  %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+  ret <4 x i32> %abs
+}
+
+define <2 x i64> @vabsd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) #0 {
+; CHECK-LABEL: vabsd_v2i64_nsw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsub.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    vneg.d $vr1, $vr0
+; CHECK-NEXT:    vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+  %sub = sub nsw <2 x i64> %a, %b
+  %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+  ret <2 x i64> %abs
+}
+
+define <16 x i8> @smaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
+; CHECK-LABEL: smaxmin_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.b $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a = tail call <16 x i8> @llvm.smax.v16i8(<16 x i8> %0, <16 x i8> %1)
+  %b = tail call <16 x i8> @llvm.smin.v16i8(<16 x i8> %0, <16 x i8> %1)
+  %sub = sub <16 x i8> %a, %b
+  ret <16 x i8> %sub
+}
+
+define <8 x i16> @smaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
+; CHECK-LABEL: smaxmin_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.h $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.h $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a = tail call <8 x i16> @llvm.smax.v8i16(<8 x i16> %0, <8 x i16> %1)
+  %b = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> %0, <8 x i16> %1)
+  %sub = sub <8 x i16> %a, %b
+  ret <8 x i16> %sub
+}
+
+define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
+; CHECK-LABEL: smaxmin_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.w $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.w $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %0, <4 x i32> %1)
+  %b = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %0, <4 x i32> %1)
+  %sub = sub <4 x i32> %a, %b
+  ret <4 x i32> %sub
+}
+
+define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
+; CHECK-LABEL: smaxmin_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.d $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.d $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1)
+  %b = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %0, <2 x i64> %1)
+  %sub = sub <2 x i64> %a, %b
+  ret <2 x i64> %sub
+}
+
+define <16 x i8> @umaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
+; CHECK-LABEL: umaxmin_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.bu $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
+  %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %0, <16 x i8> %1)
+  %sub = sub <16 x i8> %a, %b
+  ret <16 x i8> %sub
+}
+
+define <8 x i16> @umaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
+; CHECK-LABEL: umaxmin_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.hu $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.h $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a = tail call <8 x i16> @llvm.umax.v8i16(<8 x i16> %0, <8 x i16> %1)
+  %b = tail call <8 x i16> @llvm.umin.v8i16(<8 x i16> %0, <8 x i16> %1)
+  %sub = sub <8 x i16> %a, %b
+  ret <8 x i16> %sub
+}
+
+define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
+; CHECK-LABEL: umaxmin_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.wu $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.w $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a = tail call <4 x i32> @llvm.umax.v4i32(<4 x i32> %0, <4 x i32> %1)
+  %b = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %0, <4 x i32> %1)
+  %sub = sub <4 x i32> %a, %b
+  ret <4 x i32> %sub
+}
+
+define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
+; CHECK-LABEL: umaxmin_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.du $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.d $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1)
+  %b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1)
+  %sub = sub <2 x i64> %a, %b
+  ret <2 x i64> %sub
+}
+
+define <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) {
+; CHECK-LABEL: umaxmin_v16i8_com1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.bu $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
+  %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %1, <16 x i8> %0)
+  %sub = sub <16 x i8> %a, %b
+  ret <16 x i8> %sub
+}
+
+declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1)
+
+declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1)
+declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
+
+declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1)
+declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
+
+declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1)
+declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
+
+declare <2 x i128> @llvm.abs.v2i128(<2 x i128>, i1)
+
+declare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>)
+declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>)
+declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>)
+declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>)

>From ed0839ca88c454c8d427272b771db9908c5bb771 Mon Sep 17 00:00:00 2001
From: tangaac <tangyan01 at loongson.cn>
Date: Tue, 25 Mar 2025 17:04:02 +0800
Subject: [PATCH 2/5] small change

---
 llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll | 6 +++---
 llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll  | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
index 42152bc7f5fcf..882c69c8e9b1b 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
 
-;; Mostly copy from AArch64/neon-abd.ll
+;; Mostly copied from AArch64/neon-abd.ll
 
 ;
-; VABDS_[B/H/W/D]
+; XVABDS_[B/H/W/D]
 ;
 define <32 x i8> @xvabsd_b(<32 x i8> %a, <32 x i8> %b) #0 {
 ; CHECK-LABEL: xvabsd_b:
@@ -67,7 +67,7 @@ define <4 x i64> @xvabsd_d(<4 x i64> %a, <4 x i64> %b) #0 {
 }
 
 ;
-; xvabsd_[B/H/W/D]U
+; XVABSD_[B/H/W/D]U
 ;
 
 define <32 x i8> @xvabsd_bu(<32 x i8> %a, <32 x i8> %b) #0 {
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll
index 3ecfaed8e1505..db552ba962a1f 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s
 
-;; Mostly copy from AArch64/neon-abd.ll
+;; Mostly copied from AArch64/neon-abd.ll
 
 ;
 ; VABDS_[B/H/W/D]

>From e47a54667b16b7c2bdcb400de51bda681f1af79a Mon Sep 17 00:00:00 2001
From: tangaac <tangyan01 at loongson.cn>
Date: Tue, 25 Mar 2025 17:14:00 +0800
Subject: [PATCH 3/5] remove uncessary mark

---
 .../LoongArch/lasx/ir-instruction/absd.ll     | 24 +++++++++----------
 .../LoongArch/lsx/ir-instruction/absd.ll      | 24 +++++++++----------
 2 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
index 882c69c8e9b1b..2da6b35b470e9 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
@@ -6,7 +6,7 @@
 ;
 ; XVABDS_[B/H/W/D]
 ;
-define <32 x i8> @xvabsd_b(<32 x i8> %a, <32 x i8> %b) #0 {
+define <32 x i8> @xvabsd_b(<32 x i8> %a, <32 x i8> %b) {
 ; CHECK-LABEL: xvabsd_b:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.b $xr2, $xr0, $xr1
@@ -21,7 +21,7 @@ define <32 x i8> @xvabsd_b(<32 x i8> %a, <32 x i8> %b) #0 {
   ret <32 x i8> %trunc
 }
 
-define <16 x i16> @xvabsd_h(<16 x i16> %a, <16 x i16> %b) #0 {
+define <16 x i16> @xvabsd_h(<16 x i16> %a, <16 x i16> %b) {
 ; CHECK-LABEL: xvabsd_h:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.h $xr2, $xr0, $xr1
@@ -36,7 +36,7 @@ define <16 x i16> @xvabsd_h(<16 x i16> %a, <16 x i16> %b) #0 {
   ret <16 x i16> %trunc
 }
 
-define <8 x i32> @xvabsd_w(<8 x i32> %a, <8 x i32> %b) #0 {
+define <8 x i32> @xvabsd_w(<8 x i32> %a, <8 x i32> %b) {
 ; CHECK-LABEL: xvabsd_w:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.w $xr2, $xr0, $xr1
@@ -51,7 +51,7 @@ define <8 x i32> @xvabsd_w(<8 x i32> %a, <8 x i32> %b) #0 {
   ret <8 x i32> %trunc
 }
 
-define <4 x i64> @xvabsd_d(<4 x i64> %a, <4 x i64> %b) #0 {
+define <4 x i64> @xvabsd_d(<4 x i64> %a, <4 x i64> %b) {
 ; CHECK-LABEL: xvabsd_d:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.d $xr2, $xr0, $xr1
@@ -70,7 +70,7 @@ define <4 x i64> @xvabsd_d(<4 x i64> %a, <4 x i64> %b) #0 {
 ; XVABSD_[B/H/W/D]U
 ;
 
-define <32 x i8> @xvabsd_bu(<32 x i8> %a, <32 x i8> %b) #0 {
+define <32 x i8> @xvabsd_bu(<32 x i8> %a, <32 x i8> %b) {
 ; CHECK-LABEL: xvabsd_bu:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.bu $xr2, $xr0, $xr1
@@ -85,7 +85,7 @@ define <32 x i8> @xvabsd_bu(<32 x i8> %a, <32 x i8> %b) #0 {
   ret <32 x i8> %trunc
 }
 
-define <16 x i16> @xvabsd_hu(<16 x i16> %a, <16 x i16> %b) #0 {
+define <16 x i16> @xvabsd_hu(<16 x i16> %a, <16 x i16> %b) {
 ; CHECK-LABEL: xvabsd_hu:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.hu $xr2, $xr0, $xr1
@@ -100,7 +100,7 @@ define <16 x i16> @xvabsd_hu(<16 x i16> %a, <16 x i16> %b) #0 {
   ret <16 x i16> %trunc
 }
 
-define <8 x i32> @xvabsd_wu(<8 x i32> %a, <8 x i32> %b) #0 {
+define <8 x i32> @xvabsd_wu(<8 x i32> %a, <8 x i32> %b) {
 ; CHECK-LABEL: xvabsd_wu:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.wu $xr2, $xr0, $xr1
@@ -115,7 +115,7 @@ define <8 x i32> @xvabsd_wu(<8 x i32> %a, <8 x i32> %b) #0 {
   ret <8 x i32> %trunc
 }
 
-define <4 x i64> @xvabsd_du(<4 x i64> %a, <4 x i64> %b) #0 {
+define <4 x i64> @xvabsd_du(<4 x i64> %a, <4 x i64> %b) {
 ; CHECK-LABEL: xvabsd_du:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.du $xr2, $xr0, $xr1
@@ -130,7 +130,7 @@ define <4 x i64> @xvabsd_du(<4 x i64> %a, <4 x i64> %b) #0 {
   ret <4 x i64> %trunc
 }
 
-define <32 x i8> @xvabsd_v32i8_nsw(<32 x i8> %a, <32 x i8> %b) #0 {
+define <32 x i8> @xvabsd_v32i8_nsw(<32 x i8> %a, <32 x i8> %b) {
 ; CHECK-LABEL: xvabsd_v32i8_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr1
@@ -142,7 +142,7 @@ define <32 x i8> @xvabsd_v32i8_nsw(<32 x i8> %a, <32 x i8> %b) #0 {
   ret <32 x i8> %abs
 }
 
-define <16 x i16> @xvabsd_v16i16_nsw(<16 x i16> %a, <16 x i16> %b) #0 {
+define <16 x i16> @xvabsd_v16i16_nsw(<16 x i16> %a, <16 x i16> %b) {
 ; CHECK-LABEL: xvabsd_v16i16_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr1
@@ -154,7 +154,7 @@ define <16 x i16> @xvabsd_v16i16_nsw(<16 x i16> %a, <16 x i16> %b) #0 {
   ret <16 x i16> %abs
 }
 
-define <8 x i32> @xvabsd_v8i32_nsw(<8 x i32> %a, <8 x i32> %b) #0 {
+define <8 x i32> @xvabsd_v8i32_nsw(<8 x i32> %a, <8 x i32> %b) {
 ; CHECK-LABEL: xvabsd_v8i32_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr1
@@ -166,7 +166,7 @@ define <8 x i32> @xvabsd_v8i32_nsw(<8 x i32> %a, <8 x i32> %b) #0 {
   ret <8 x i32> %abs
 }
 
-define <4 x i64> @xvabsd_v4i64_nsw(<4 x i64> %a, <4 x i64> %b) #0 {
+define <4 x i64> @xvabsd_v4i64_nsw(<4 x i64> %a, <4 x i64> %b) {
 ; CHECK-LABEL: xvabsd_v4i64_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr1
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll
index db552ba962a1f..63c3f88ce6aa0 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll
@@ -6,7 +6,7 @@
 ;
 ; VABDS_[B/H/W/D]
 ;
-define <16 x i8> @vabsd_b(<16 x i8> %a, <16 x i8> %b) #0 {
+define <16 x i8> @vabsd_b(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-LABEL: vabsd_b:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.b $vr2, $vr0, $vr1
@@ -21,7 +21,7 @@ define <16 x i8> @vabsd_b(<16 x i8> %a, <16 x i8> %b) #0 {
   ret <16 x i8> %trunc
 }
 
-define <8 x i16> @vabsd_h(<8 x i16> %a, <8 x i16> %b) #0 {
+define <8 x i16> @vabsd_h(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-LABEL: vabsd_h:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.h $vr2, $vr0, $vr1
@@ -36,7 +36,7 @@ define <8 x i16> @vabsd_h(<8 x i16> %a, <8 x i16> %b) #0 {
   ret <8 x i16> %trunc
 }
 
-define <4 x i32> @vabsd_w(<4 x i32> %a, <4 x i32> %b) #0 {
+define <4 x i32> @vabsd_w(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: vabsd_w:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.w $vr2, $vr0, $vr1
@@ -51,7 +51,7 @@ define <4 x i32> @vabsd_w(<4 x i32> %a, <4 x i32> %b) #0 {
   ret <4 x i32> %trunc
 }
 
-define <2 x i64> @vabsd_d(<2 x i64> %a, <2 x i64> %b) #0 {
+define <2 x i64> @vabsd_d(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: vabsd_d:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.d $vr2, $vr0, $vr1
@@ -70,7 +70,7 @@ define <2 x i64> @vabsd_d(<2 x i64> %a, <2 x i64> %b) #0 {
 ; VABSD_[B/H/W/D]U
 ;
 
-define <16 x i8> @vabsd_bu(<16 x i8> %a, <16 x i8> %b) #0 {
+define <16 x i8> @vabsd_bu(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-LABEL: vabsd_bu:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.bu $vr2, $vr0, $vr1
@@ -85,7 +85,7 @@ define <16 x i8> @vabsd_bu(<16 x i8> %a, <16 x i8> %b) #0 {
   ret <16 x i8> %trunc
 }
 
-define <8 x i16> @vabsd_hu(<8 x i16> %a, <8 x i16> %b) #0 {
+define <8 x i16> @vabsd_hu(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-LABEL: vabsd_hu:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.hu $vr2, $vr0, $vr1
@@ -100,7 +100,7 @@ define <8 x i16> @vabsd_hu(<8 x i16> %a, <8 x i16> %b) #0 {
   ret <8 x i16> %trunc
 }
 
-define <4 x i32> @vabsd_wu(<4 x i32> %a, <4 x i32> %b) #0 {
+define <4 x i32> @vabsd_wu(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: vabsd_wu:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.wu $vr2, $vr0, $vr1
@@ -115,7 +115,7 @@ define <4 x i32> @vabsd_wu(<4 x i32> %a, <4 x i32> %b) #0 {
   ret <4 x i32> %trunc
 }
 
-define <2 x i64> @vabsd_du(<2 x i64> %a, <2 x i64> %b) #0 {
+define <2 x i64> @vabsd_du(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: vabsd_du:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.du $vr2, $vr0, $vr1
@@ -130,7 +130,7 @@ define <2 x i64> @vabsd_du(<2 x i64> %a, <2 x i64> %b) #0 {
   ret <2 x i64> %trunc
 }
 
-define <16 x i8> @vabsd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) #0 {
+define <16 x i8> @vabsd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-LABEL: vabsd_v16i8_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr1
@@ -142,7 +142,7 @@ define <16 x i8> @vabsd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) #0 {
   ret <16 x i8> %abs
 }
 
-define <8 x i16> @vabsd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) #0 {
+define <8 x i16> @vabsd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-LABEL: vabsd_v8i16_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsub.h $vr0, $vr0, $vr1
@@ -154,7 +154,7 @@ define <8 x i16> @vabsd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) #0 {
   ret <8 x i16> %abs
 }
 
-define <4 x i32> @vabsd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) #0 {
+define <4 x i32> @vabsd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: vabsd_v4i32_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsub.w $vr0, $vr0, $vr1
@@ -166,7 +166,7 @@ define <4 x i32> @vabsd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) #0 {
   ret <4 x i32> %abs
 }
 
-define <2 x i64> @vabsd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) #0 {
+define <2 x i64> @vabsd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: vabsd_v2i64_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsub.d $vr0, $vr0, $vr1

>From ff8982e98bf446be97a41d10410d3bd5461d7efb Mon Sep 17 00:00:00 2001
From: tangaac <tangyan01 at loongson.cn>
Date: Wed, 26 Mar 2025 14:47:09 +0800
Subject: [PATCH 4/5] update tests

---
 .../LoongArch/lasx/ir-instruction/absd.ll     | 407 ++++++++++++++----
 .../LoongArch/lsx/ir-instruction/absd.ll      | 299 +++++++++++--
 2 files changed, 584 insertions(+), 122 deletions(-)

diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
index 2da6b35b470e9..a24a4165e00d5 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
@@ -1,11 +1,18 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
 
-;; Mostly copied from AArch64/neon-abd.ll
+;; TODO: Currently LoongArch generates sub-optimal code for these cases
+;; 1. trunc(abs(sub(sext(a),sext(b)))) -> abds(a,b) or abdu(a,b)
+;; 2. abs(sub_nsw(x, y)) -> abds(a,b)
+;; 3. sub(smax(a,b),smin(a,b)) -> abds(a,b) or abdu(a,b)
+;; 4. select(icmp(a,b),sub(a,b),sub(b,a)) -> abds(a,b) or abdu(a,b)
+;; 5. sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abds(a,b) or abdu(a,b)
+;;
+;; abds / abdu can be lower to xvabsd.{b/h/w/d} / xvabsd.{b/h/w/d}u  instruction.
+;;
+;; Later patch will address it.
 
-;
-; XVABDS_[B/H/W/D]
-;
+;; trunc(abs(sub(sext(a),sext(b)))) -> abds(a,b)
 define <32 x i8> @xvabsd_b(<32 x i8> %a, <32 x i8> %b) {
 ; CHECK-LABEL: xvabsd_b:
 ; CHECK:       # %bb.0:
@@ -16,7 +23,7 @@ define <32 x i8> @xvabsd_b(<32 x i8> %a, <32 x i8> %b) {
   %a.sext = sext <32 x i8> %a to <32 x i16>
   %b.sext = sext <32 x i8> %b to <32 x i16>
   %sub = sub <32 x i16> %a.sext, %b.sext
-  %abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true)
+  %abs = call <32 x i16> @llvm.abs.v16i16(<32 x i16> %sub, i1 true)
   %trunc = trunc <32 x i16> %abs to <32 x i8>
   ret <32 x i8> %trunc
 }
@@ -31,7 +38,7 @@ define <16 x i16> @xvabsd_h(<16 x i16> %a, <16 x i16> %b) {
   %a.sext = sext <16 x i16> %a to <16 x i32>
   %b.sext = sext <16 x i16> %b to <16 x i32>
   %sub = sub <16 x i32> %a.sext, %b.sext
-  %abs = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %sub, i1 true)
+  %abs = call <16 x i32> @llvm.abs.v8i32(<16 x i32> %sub, i1 true)
   %trunc = trunc <16 x i32> %abs to <16 x i16>
   ret <16 x i16> %trunc
 }
@@ -46,7 +53,7 @@ define <8 x i32> @xvabsd_w(<8 x i32> %a, <8 x i32> %b) {
   %a.sext = sext <8 x i32> %a to <8 x i64>
   %b.sext = sext <8 x i32> %b to <8 x i64>
   %sub = sub <8 x i64> %a.sext, %b.sext
-  %abs = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %sub, i1 true)
+  %abs = call <8 x i64> @llvm.abs.v4i64(<8 x i64> %sub, i1 true)
   %trunc = trunc <8 x i64> %abs to <8 x i32>
   ret <8 x i32> %trunc
 }
@@ -61,15 +68,11 @@ define <4 x i64> @xvabsd_d(<4 x i64> %a, <4 x i64> %b) {
   %a.sext = sext <4 x i64> %a to <4 x i128>
   %b.sext = sext <4 x i64> %b to <4 x i128>
   %sub = sub <4 x i128> %a.sext, %b.sext
-  %abs = call <4 x i128> @llvm.abs.v4i128(<4 x i128> %sub, i1 true)
+  %abs = call <4 x i128> @llvm.abs.v2i128(<4 x i128> %sub, i1 true)
   %trunc = trunc <4 x i128> %abs to <4 x i64>
   ret <4 x i64> %trunc
 }
 
-;
-; XVABSD_[B/H/W/D]U
-;
-
 define <32 x i8> @xvabsd_bu(<32 x i8> %a, <32 x i8> %b) {
 ; CHECK-LABEL: xvabsd_bu:
 ; CHECK:       # %bb.0:
@@ -80,7 +83,7 @@ define <32 x i8> @xvabsd_bu(<32 x i8> %a, <32 x i8> %b) {
   %a.zext = zext <32 x i8> %a to <32 x i16>
   %b.zext = zext <32 x i8> %b to <32 x i16>
   %sub = sub <32 x i16> %a.zext, %b.zext
-  %abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true)
+  %abs = call <32 x i16> @llvm.abs.v16i16(<32 x i16> %sub, i1 true)
   %trunc = trunc <32 x i16> %abs to <32 x i8>
   ret <32 x i8> %trunc
 }
@@ -95,7 +98,7 @@ define <16 x i16> @xvabsd_hu(<16 x i16> %a, <16 x i16> %b) {
   %a.zext = zext <16 x i16> %a to <16 x i32>
   %b.zext = zext <16 x i16> %b to <16 x i32>
   %sub = sub <16 x i32> %a.zext, %b.zext
-  %abs = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %sub, i1 true)
+  %abs = call <16 x i32> @llvm.abs.v8i32(<16 x i32> %sub, i1 true)
   %trunc = trunc <16 x i32> %abs to <16 x i16>
   ret <16 x i16> %trunc
 }
@@ -110,7 +113,7 @@ define <8 x i32> @xvabsd_wu(<8 x i32> %a, <8 x i32> %b) {
   %a.zext = zext <8 x i32> %a to <8 x i64>
   %b.zext = zext <8 x i32> %b to <8 x i64>
   %sub = sub <8 x i64> %a.zext, %b.zext
-  %abs = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %sub, i1 true)
+  %abs = call <8 x i64> @llvm.abs.v4i64(<8 x i64> %sub, i1 true)
   %trunc = trunc <8 x i64> %abs to <8 x i32>
   ret <8 x i32> %trunc
 }
@@ -125,202 +128,430 @@ define <4 x i64> @xvabsd_du(<4 x i64> %a, <4 x i64> %b) {
   %a.zext = zext <4 x i64> %a to <4 x i128>
   %b.zext = zext <4 x i64> %b to <4 x i128>
   %sub = sub <4 x i128> %a.zext, %b.zext
-  %abs = call <4 x i128> @llvm.abs.v4i128(<4 x i128> %sub, i1 true)
+  %abs = call <4 x i128> @llvm.abs.v2i128(<4 x i128> %sub, i1 true)
   %trunc = trunc <4 x i128> %abs to <4 x i64>
   ret <4 x i64> %trunc
 }
 
-define <32 x i8> @xvabsd_v32i8_nsw(<32 x i8> %a, <32 x i8> %b) {
-; CHECK-LABEL: xvabsd_v32i8_nsw:
+;; abs(sub_nsw(x, y)) -> abds(a,b)
+define <32 x i8> @xvabsd_b_nsw(<32 x i8> %a, <32 x i8> %b) {
+; CHECK-LABEL: xvabsd_b_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvneg.b $xr1, $xr0
 ; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %sub = sub nsw <32 x i8> %a, %b
-  %abs = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %sub, i1 true)
+  %abs = call <32 x i8> @llvm.abs.v16i8(<32 x i8> %sub, i1 true)
   ret <32 x i8> %abs
 }
 
-define <16 x i16> @xvabsd_v16i16_nsw(<16 x i16> %a, <16 x i16> %b) {
-; CHECK-LABEL: xvabsd_v16i16_nsw:
+define <16 x i16> @xvabsd_h_nsw(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: xvabsd_h_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvneg.h $xr1, $xr0
 ; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %sub = sub nsw <16 x i16> %a, %b
-  %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+  %abs = call <16 x i16> @llvm.abs.v8i16(<16 x i16> %sub, i1 true)
   ret <16 x i16> %abs
 }
 
-define <8 x i32> @xvabsd_v8i32_nsw(<8 x i32> %a, <8 x i32> %b) {
-; CHECK-LABEL: xvabsd_v8i32_nsw:
+define <8 x i32> @xvabsd_w_nsw(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: xvabsd_w_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvneg.w $xr1, $xr0
 ; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %sub = sub nsw <8 x i32> %a, %b
-  %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+  %abs = call <8 x i32> @llvm.abs.v4i32(<8 x i32> %sub, i1 true)
   ret <8 x i32> %abs
 }
 
-define <4 x i64> @xvabsd_v4i64_nsw(<4 x i64> %a, <4 x i64> %b) {
-; CHECK-LABEL: xvabsd_v4i64_nsw:
+define <4 x i64> @xvabsd_d_nsw(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-LABEL: xvabsd_d_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvneg.d $xr1, $xr0
 ; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %sub = sub nsw <4 x i64> %a, %b
-  %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+  %abs = call <4 x i64> @llvm.abs.v2i64(<4 x i64> %sub, i1 true)
   ret <4 x i64> %abs
 }
 
-define <32 x i8> @smaxmin_v32i8(<32 x i8> %0, <32 x i8> %1) {
-; CHECK-LABEL: smaxmin_v32i8:
+;; sub(smax(a,b),smin(a,b)) -> abds(a,b)
+define <32 x i8> @maxmin_b(<32 x i8> %0, <32 x i8> %1) {
+; CHECK-LABEL: maxmin_b:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.b $xr2, $xr0, $xr1
 ; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <32 x i8> @llvm.smax.v32i8(<32 x i8> %0, <32 x i8> %1)
-  %b = tail call <32 x i8> @llvm.smin.v32i8(<32 x i8> %0, <32 x i8> %1)
+  %a = tail call <32 x i8> @llvm.smax.v16i8(<32 x i8> %0, <32 x i8> %1)
+  %b = tail call <32 x i8> @llvm.smin.v16i8(<32 x i8> %0, <32 x i8> %1)
   %sub = sub <32 x i8> %a, %b
   ret <32 x i8> %sub
 }
 
-define <16 x i16> @smaxmin_v16i16(<16 x i16> %0, <16 x i16> %1) {
-; CHECK-LABEL: smaxmin_v16i16:
+define <16 x i16> @maxmin_h(<16 x i16> %0, <16 x i16> %1) {
+; CHECK-LABEL: maxmin_h:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.h $xr2, $xr0, $xr1
 ; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> %0, <16 x i16> %1)
-  %b = tail call <16 x i16> @llvm.smin.v16i16(<16 x i16> %0, <16 x i16> %1)
+  %a = tail call <16 x i16> @llvm.smax.v8i16(<16 x i16> %0, <16 x i16> %1)
+  %b = tail call <16 x i16> @llvm.smin.v8i16(<16 x i16> %0, <16 x i16> %1)
   %sub = sub <16 x i16> %a, %b
   ret <16 x i16> %sub
 }
 
-define <8 x i32> @smaxmin_v8i32(<8 x i32> %0, <8 x i32> %1) {
-; CHECK-LABEL: smaxmin_v8i32:
+define <8 x i32> @maxmin_w(<8 x i32> %0, <8 x i32> %1) {
+; CHECK-LABEL: maxmin_w:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.w $xr2, $xr0, $xr1
 ; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <8 x i32> @llvm.smax.v8i32(<8 x i32> %0, <8 x i32> %1)
-  %b = tail call <8 x i32> @llvm.smin.v8i32(<8 x i32> %0, <8 x i32> %1)
+  %a = tail call <8 x i32> @llvm.smax.v4i32(<8 x i32> %0, <8 x i32> %1)
+  %b = tail call <8 x i32> @llvm.smin.v4i32(<8 x i32> %0, <8 x i32> %1)
   %sub = sub <8 x i32> %a, %b
   ret <8 x i32> %sub
 }
 
-define <4 x i64> @smaxmin_v4i64(<4 x i64> %0, <4 x i64> %1) {
-; CHECK-LABEL: smaxmin_v4i64:
+define <4 x i64> @maxmin_d(<4 x i64> %0, <4 x i64> %1) {
+; CHECK-LABEL: maxmin_d:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.d $xr2, $xr0, $xr1
 ; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %0, <4 x i64> %1)
-  %b = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %0, <4 x i64> %1)
+  %a = tail call <4 x i64> @llvm.smax.v2i64(<4 x i64> %0, <4 x i64> %1)
+  %b = tail call <4 x i64> @llvm.smin.v2i64(<4 x i64> %0, <4 x i64> %1)
   %sub = sub <4 x i64> %a, %b
   ret <4 x i64> %sub
 }
 
-define <32 x i8> @umaxmin_v32i8(<32 x i8> %0, <32 x i8> %1) {
-; CHECK-LABEL: umaxmin_v32i8:
+define <32 x i8> @maxmin_bu(<32 x i8> %0, <32 x i8> %1) {
+; CHECK-LABEL: maxmin_bu:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.bu $xr2, $xr0, $xr1
 ; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <32 x i8> @llvm.umax.v32i8(<32 x i8> %0, <32 x i8> %1)
-  %b = tail call <32 x i8> @llvm.umin.v32i8(<32 x i8> %0, <32 x i8> %1)
+  %a = tail call <32 x i8> @llvm.umax.v16i8(<32 x i8> %0, <32 x i8> %1)
+  %b = tail call <32 x i8> @llvm.umin.v16i8(<32 x i8> %0, <32 x i8> %1)
   %sub = sub <32 x i8> %a, %b
   ret <32 x i8> %sub
 }
 
-define <16 x i16> @umaxmin_v16i16(<16 x i16> %0, <16 x i16> %1) {
-; CHECK-LABEL: umaxmin_v16i16:
+define <16 x i16> @maxmin_hu(<16 x i16> %0, <16 x i16> %1) {
+; CHECK-LABEL: maxmin_hu:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.hu $xr2, $xr0, $xr1
 ; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <16 x i16> @llvm.umax.v16i16(<16 x i16> %0, <16 x i16> %1)
-  %b = tail call <16 x i16> @llvm.umin.v16i16(<16 x i16> %0, <16 x i16> %1)
+  %a = tail call <16 x i16> @llvm.umax.v8i16(<16 x i16> %0, <16 x i16> %1)
+  %b = tail call <16 x i16> @llvm.umin.v8i16(<16 x i16> %0, <16 x i16> %1)
   %sub = sub <16 x i16> %a, %b
   ret <16 x i16> %sub
 }
 
-define <8 x i32> @umaxmin_v8i32(<8 x i32> %0, <8 x i32> %1) {
-; CHECK-LABEL: umaxmin_v8i32:
+define <8 x i32> @maxmin_wu(<8 x i32> %0, <8 x i32> %1) {
+; CHECK-LABEL: maxmin_wu:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.wu $xr2, $xr0, $xr1
 ; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <8 x i32> @llvm.umax.v8i32(<8 x i32> %0, <8 x i32> %1)
-  %b = tail call <8 x i32> @llvm.umin.v8i32(<8 x i32> %0, <8 x i32> %1)
+  %a = tail call <8 x i32> @llvm.umax.v4i32(<8 x i32> %0, <8 x i32> %1)
+  %b = tail call <8 x i32> @llvm.umin.v4i32(<8 x i32> %0, <8 x i32> %1)
   %sub = sub <8 x i32> %a, %b
   ret <8 x i32> %sub
 }
 
-define <4 x i64> @umaxmin_v4i64(<4 x i64> %0, <4 x i64> %1) {
-; CHECK-LABEL: umaxmin_v4i64:
+define <4 x i64> @maxmin_du(<4 x i64> %0, <4 x i64> %1) {
+; CHECK-LABEL: maxmin_du:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.du $xr2, $xr0, $xr1
 ; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <4 x i64> @llvm.umax.v4i64(<4 x i64> %0, <4 x i64> %1)
-  %b = tail call <4 x i64> @llvm.umin.v4i64(<4 x i64> %0, <4 x i64> %1)
+  %a = tail call <4 x i64> @llvm.umax.v2i64(<4 x i64> %0, <4 x i64> %1)
+  %b = tail call <4 x i64> @llvm.umin.v2i64(<4 x i64> %0, <4 x i64> %1)
   %sub = sub <4 x i64> %a, %b
   ret <4 x i64> %sub
 }
 
-define <32 x i8> @umaxmin_v32i8_com1(<32 x i8> %0, <32 x i8> %1) {
-; CHECK-LABEL: umaxmin_v32i8_com1:
+define <32 x i8> @maxmin_bu_com1(<32 x i8> %0, <32 x i8> %1) {
+; CHECK-LABEL: maxmin_bu_com1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmin.bu $xr2, $xr0, $xr1
 ; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <32 x i8> @llvm.umax.v32i8(<32 x i8> %0, <32 x i8> %1)
-  %b = tail call <32 x i8> @llvm.umin.v32i8(<32 x i8> %1, <32 x i8> %0)
+  %a = tail call <32 x i8> @llvm.umax.v16i8(<32 x i8> %0, <32 x i8> %1)
+  %b = tail call <32 x i8> @llvm.umin.v16i8(<32 x i8> %1, <32 x i8> %0)
   %sub = sub <32 x i8> %a, %b
   ret <32 x i8> %sub
 }
 
-declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1)
+;; select(icmp(a,b),sub(a,b),sub(b,a)) -> abds(a,b)
+define <32 x i8> @xvabsd_b_cmp(<32 x i8> %a, <32 x i8> %b) nounwind {
+; CHECK-LABEL: xvabsd_b_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.b $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp slt <32 x i8> %a, %b
+  %ab = sub <32 x i8> %a, %b
+  %ba = sub <32 x i8> %b, %a
+  %sel = select <32 x i1> %cmp, <32 x i8> %ba, <32 x i8> %ab
+  ret <32 x i8> %sel
+}
+
+define <16 x i16> @xvabsd_h_cmp(<16 x i16> %a, <16 x i16> %b) nounwind {
+; CHECK-LABEL: xvabsd_h_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.h $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp slt <16 x i16> %a, %b
+  %ab = sub <16 x i16> %a, %b
+  %ba = sub <16 x i16> %b, %a
+  %sel = select <16 x i1> %cmp, <16 x i16> %ba, <16 x i16> %ab
+  ret <16 x i16> %sel
+}
+
+define <8 x i32> @xvabsd_w_cmp(<8 x i32> %a, <8 x i32> %b) nounwind {
+; CHECK-LABEL: xvabsd_w_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.w $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp slt <8 x i32> %a, %b
+  %ab = sub <8 x i32> %a, %b
+  %ba = sub <8 x i32> %b, %a
+  %sel = select <8 x i1> %cmp, <8 x i32> %ba, <8 x i32> %ab
+  ret <8 x i32> %sel
+}
+
+define <4 x i64> @xvabsd_d_cmp(<4 x i64> %a, <4 x i64> %b) nounwind {
+; CHECK-LABEL: xvabsd_d_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.d $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp slt <4 x i64> %a, %b
+  %ab = sub <4 x i64> %a, %b
+  %ba = sub <4 x i64> %b, %a
+  %sel = select <4 x i1> %cmp, <4 x i64> %ba, <4 x i64> %ab
+  ret <4 x i64> %sel
+}
+
+define <32 x i8> @xvabsd_bu_cmp(<32 x i8> %a, <32 x i8> %b) nounwind {
+; CHECK-LABEL: xvabsd_bu_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.bu $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ult <32 x i8> %a, %b
+  %ab = sub <32 x i8> %a, %b
+  %ba = sub <32 x i8> %b, %a
+  %sel = select <32 x i1> %cmp, <32 x i8> %ba, <32 x i8> %ab
+  ret <32 x i8> %sel
+}
+
+define <16 x i16> @xvabsd_hu_cmp(<16 x i16> %a, <16 x i16> %b) nounwind {
+; CHECK-LABEL: xvabsd_hu_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.hu $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ult <16 x i16> %a, %b
+  %ab = sub <16 x i16> %a, %b
+  %ba = sub <16 x i16> %b, %a
+  %sel = select <16 x i1> %cmp, <16 x i16> %ba, <16 x i16> %ab
+  ret <16 x i16> %sel
+}
+
+define <8 x i32> @xvabsd_wu_cmp(<8 x i32> %a, <8 x i32> %b) nounwind {
+; CHECK-LABEL: xvabsd_wu_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.wu $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ult <8 x i32> %a, %b
+  %ab = sub <8 x i32> %a, %b
+  %ba = sub <8 x i32> %b, %a
+  %sel = select <8 x i1> %cmp, <8 x i32> %ba, <8 x i32> %ab
+  ret <8 x i32> %sel
+}
+
+define <4 x i64> @xvabsd_du_cmp(<4 x i64> %a, <4 x i64> %b) nounwind {
+; CHECK-LABEL: xvabsd_du_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.du $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ult <4 x i64> %a, %b
+  %ab = sub <4 x i64> %a, %b
+  %ba = sub <4 x i64> %b, %a
+  %sel = select <4 x i1> %cmp, <4 x i64> %ba, <4 x i64> %ab
+  ret <4 x i64> %sel
+}
+
+;; sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abds(a,b)
+define <32 x i8> @xvabsd_b_select(<32 x i8> %a, <32 x i8> %b) nounwind {
+; CHECK-LABEL: xvabsd_b_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.b $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp slt <32 x i8> %a, %b
+  %ab = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
+  %ba = select <32 x i1> %cmp, <32 x i8> %b, <32 x i8> %a
+  %sub = sub <32 x i8> %ba, %ab
+  ret <32 x i8> %sub
+}
+
+define <16 x i16> @xvabsd_h_select(<16 x i16> %a, <16 x i16> %b) nounwind {
+; CHECK-LABEL: xvabsd_h_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.h $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp sle <16 x i16> %a, %b
+  %ab = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
+  %ba = select <16 x i1> %cmp, <16 x i16> %b, <16 x i16> %a
+  %sub = sub <16 x i16> %ba, %ab
+  ret <16 x i16> %sub
+}
+
+define <8 x i32> @xvabsd_w_select(<8 x i32> %a, <8 x i32> %b) nounwind {
+; CHECK-LABEL: xvabsd_w_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.w $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp sgt <8 x i32> %a, %b
+  %ab = select <8 x i1> %cmp, <8 x i32> %a, <8 x i32> %b
+  %ba = select <8 x i1> %cmp, <8 x i32> %b, <8 x i32> %a
+  %sub = sub <8 x i32> %ab, %ba
+  ret <8 x i32> %sub
+}
+
+define <4 x i64> @xvabsd_d_select(<4 x i64> %a, <4 x i64> %b) nounwind {
+; CHECK-LABEL: xvabsd_d_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.d $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp sge <4 x i64> %a, %b
+  %ab = select <4 x i1> %cmp, <4 x i64> %a, <4 x i64> %b
+  %ba = select <4 x i1> %cmp, <4 x i64> %b, <4 x i64> %a
+  %sub = sub <4 x i64> %ab, %ba
+  ret <4 x i64> %sub
+}
+
+define <32 x i8> @xvabsd_bu_select(<32 x i8> %a, <32 x i8> %b) nounwind {
+; CHECK-LABEL: xvabsd_bu_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.bu $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ult <32 x i8> %a, %b
+  %ab = select <32 x i1> %cmp, <32 x i8> %a, <32 x i8> %b
+  %ba = select <32 x i1> %cmp, <32 x i8> %b, <32 x i8> %a
+  %sub = sub <32 x i8> %ba, %ab
+  ret <32 x i8> %sub
+}
+
+define <16 x i16> @xvabsd_hu_select(<16 x i16> %a, <16 x i16> %b) nounwind {
+; CHECK-LABEL: xvabsd_hu_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.hu $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ule <16 x i16> %a, %b
+  %ab = select <16 x i1> %cmp, <16 x i16> %a, <16 x i16> %b
+  %ba = select <16 x i1> %cmp, <16 x i16> %b, <16 x i16> %a
+  %sub = sub <16 x i16> %ba, %ab
+  ret <16 x i16> %sub
+}
+
+define <8 x i32> @xvabsd_wu_select(<8 x i32> %a, <8 x i32> %b) nounwind {
+; CHECK-LABEL: xvabsd_wu_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.wu $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ugt <8 x i32> %a, %b
+  %ab = select <8 x i1> %cmp, <8 x i32> %a, <8 x i32> %b
+  %ba = select <8 x i1> %cmp, <8 x i32> %b, <8 x i32> %a
+  %sub = sub <8 x i32> %ab, %ba
+  ret <8 x i32> %sub
+}
+
+define <4 x i64> @xvabsd_du_select(<4 x i64> %a, <4 x i64> %b) nounwind {
+; CHECK-LABEL: xvabsd_du_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmin.du $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT:    ret
+  %cmp = icmp uge <4 x i64> %a, %b
+  %ab = select <4 x i1> %cmp, <4 x i64> %a, <4 x i64> %b
+  %ba = select <4 x i1> %cmp, <4 x i64> %b, <4 x i64> %a
+  %sub = sub <4 x i64> %ab, %ba
+  ret <4 x i64> %sub
+}
+
+declare <32 x i8> @llvm.abs.v16i8(<32 x i8>, i1)
 
-declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
-declare <32 x i16> @llvm.abs.v32i16(<32 x i16>, i1)
+declare <16 x i16> @llvm.abs.v8i16(<16 x i16>, i1)
+declare <32 x i16> @llvm.abs.v16i16(<32 x i16>, i1)
 
-declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
-declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1)
+declare <8 x i32> @llvm.abs.v4i32(<8 x i32>, i1)
+declare <16 x i32> @llvm.abs.v8i32(<16 x i32>, i1)
 
-declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
-declare <8 x i64> @llvm.abs.v8i64(<8 x i64>, i1)
+declare <4 x i64> @llvm.abs.v2i64(<4 x i64>, i1)
+declare <8 x i64> @llvm.abs.v4i64(<8 x i64>, i1)
 
-declare <4 x i128> @llvm.abs.v4i128(<4 x i128>, i1)
+declare <4 x i128> @llvm.abs.v2i128(<4 x i128>, i1)
 
-declare <32 x i8> @llvm.smax.v32i8(<32 x i8>, <32 x i8>)
-declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>)
-declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>)
-declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>)
-declare <32 x i8> @llvm.smin.v32i8(<32 x i8>, <32 x i8>)
-declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>)
-declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>)
-declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>)
-declare <32 x i8> @llvm.umax.v32i8(<32 x i8>, <32 x i8>)
-declare <16 x i16> @llvm.umax.v16i16(<16 x i16>, <16 x i16>)
-declare <8 x i32> @llvm.umax.v8i32(<8 x i32>, <8 x i32>)
-declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>)
-declare <32 x i8> @llvm.umin.v32i8(<32 x i8>, <32 x i8>)
-declare <16 x i16> @llvm.umin.v16i16(<16 x i16>, <16 x i16>)
-declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>)
-declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.smax.v16i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.smax.v8i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.smax.v4i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.smax.v2i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.smin.v16i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.smin.v8i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.smin.v4i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.smin.v2i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.umax.v16i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.umax.v8i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.umax.v4i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.umax.v2i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.umin.v16i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.umin.v8i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.umin.v4i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.umin.v2i64(<4 x i64>, <4 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll
index 63c3f88ce6aa0..d83e218c5e604 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll
@@ -1,11 +1,18 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s
 
-;; Mostly copied from AArch64/neon-abd.ll
+;; TODO: Currently LoongArch generates sub-optimal code for five cases
+;; 1. trunc(abs(sub(sext(a),sext(b)))) -> abds(a,b) or abdu(a,b)
+;; 2. abs(sub_nsw(x, y)) -> abds(a,b)
+;; 3. sub(smax(a,b),smin(a,b)) -> abds(a,b) or abdu(a,b)
+;; 4. select(icmp(a,b),sub(a,b),sub(b,a)) -> abds(a,b) or abdu(a,b)
+;; 5. sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abds(a,b) or abdu(a,b)
+;;
+;; abds / abdu can be lower to vabsd.{b/h/w/d} / vabsd.{b/h/w/d}u  instruction
+;;
+;; Later patch will address it.
 
-;
-; VABDS_[B/H/W/D]
-;
+;; trunc(abs(sub(sext(a),sext(b)))) -> abds(a,b)
 define <16 x i8> @vabsd_b(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-LABEL: vabsd_b:
 ; CHECK:       # %bb.0:
@@ -66,10 +73,6 @@ define <2 x i64> @vabsd_d(<2 x i64> %a, <2 x i64> %b) {
   ret <2 x i64> %trunc
 }
 
-;
-; VABSD_[B/H/W/D]U
-;
-
 define <16 x i8> @vabsd_bu(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-LABEL: vabsd_bu:
 ; CHECK:       # %bb.0:
@@ -130,8 +133,9 @@ define <2 x i64> @vabsd_du(<2 x i64> %a, <2 x i64> %b) {
   ret <2 x i64> %trunc
 }
 
-define <16 x i8> @vabsd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) {
-; CHECK-LABEL: vabsd_v16i8_nsw:
+;; abs(sub_nsw(x, y)) -> abds(a,b)
+define <16 x i8> @vabsd_b_nsw(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vabsd_b_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr1
 ; CHECK-NEXT:    vneg.b $vr1, $vr0
@@ -142,8 +146,8 @@ define <16 x i8> @vabsd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) {
   ret <16 x i8> %abs
 }
 
-define <8 x i16> @vabsd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) {
-; CHECK-LABEL: vabsd_v8i16_nsw:
+define <8 x i16> @vabsd_h_nsw(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vabsd_h_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsub.h $vr0, $vr0, $vr1
 ; CHECK-NEXT:    vneg.h $vr1, $vr0
@@ -154,8 +158,8 @@ define <8 x i16> @vabsd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) {
   ret <8 x i16> %abs
 }
 
-define <4 x i32> @vabsd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) {
-; CHECK-LABEL: vabsd_v4i32_nsw:
+define <4 x i32> @vabsd_w_nsw(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vabsd_w_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsub.w $vr0, $vr0, $vr1
 ; CHECK-NEXT:    vneg.w $vr1, $vr0
@@ -166,8 +170,8 @@ define <4 x i32> @vabsd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) {
   ret <4 x i32> %abs
 }
 
-define <2 x i64> @vabsd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-LABEL: vabsd_v2i64_nsw:
+define <2 x i64> @vabsd_d_nsw(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vabsd_d_nsw:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsub.d $vr0, $vr0, $vr1
 ; CHECK-NEXT:    vneg.d $vr1, $vr0
@@ -178,8 +182,9 @@ define <2 x i64> @vabsd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) {
   ret <2 x i64> %abs
 }
 
-define <16 x i8> @smaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
-; CHECK-LABEL: smaxmin_v16i8:
+;; sub(smax(a,b),smin(a,b)) -> abds(a,b)
+define <16 x i8> @maxmin_b(<16 x i8> %0, <16 x i8> %1) {
+; CHECK-LABEL: maxmin_b:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.b $vr2, $vr0, $vr1
 ; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
@@ -191,8 +196,8 @@ define <16 x i8> @smaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
   ret <16 x i8> %sub
 }
 
-define <8 x i16> @smaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
-; CHECK-LABEL: smaxmin_v8i16:
+define <8 x i16> @maxmin_h(<8 x i16> %0, <8 x i16> %1) {
+; CHECK-LABEL: maxmin_h:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.h $vr2, $vr0, $vr1
 ; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
@@ -204,8 +209,8 @@ define <8 x i16> @smaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
   ret <8 x i16> %sub
 }
 
-define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
-; CHECK-LABEL: smaxmin_v4i32:
+define <4 x i32> @maxmin_w(<4 x i32> %0, <4 x i32> %1) {
+; CHECK-LABEL: maxmin_w:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.w $vr2, $vr0, $vr1
 ; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
@@ -217,8 +222,8 @@ define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
   ret <4 x i32> %sub
 }
 
-define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
-; CHECK-LABEL: smaxmin_v2i64:
+define <2 x i64> @maxmin_d(<2 x i64> %0, <2 x i64> %1) {
+; CHECK-LABEL: maxmin_d:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.d $vr2, $vr0, $vr1
 ; CHECK-NEXT:    vmax.d $vr0, $vr0, $vr1
@@ -230,8 +235,8 @@ define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
   ret <2 x i64> %sub
 }
 
-define <16 x i8> @umaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
-; CHECK-LABEL: umaxmin_v16i8:
+define <16 x i8> @maxmin_bu(<16 x i8> %0, <16 x i8> %1) {
+; CHECK-LABEL: maxmin_bu:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.bu $vr2, $vr0, $vr1
 ; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
@@ -243,8 +248,8 @@ define <16 x i8> @umaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
   ret <16 x i8> %sub
 }
 
-define <8 x i16> @umaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
-; CHECK-LABEL: umaxmin_v8i16:
+define <8 x i16> @maxmin_hu(<8 x i16> %0, <8 x i16> %1) {
+; CHECK-LABEL: maxmin_hu:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.hu $vr2, $vr0, $vr1
 ; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
@@ -256,8 +261,8 @@ define <8 x i16> @umaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
   ret <8 x i16> %sub
 }
 
-define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
-; CHECK-LABEL: umaxmin_v4i32:
+define <4 x i32> @maxmin_wu(<4 x i32> %0, <4 x i32> %1) {
+; CHECK-LABEL: maxmin_wu:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.wu $vr2, $vr0, $vr1
 ; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
@@ -269,8 +274,8 @@ define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
   ret <4 x i32> %sub
 }
 
-define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
-; CHECK-LABEL: umaxmin_v2i64:
+define <2 x i64> @maxmin_du(<2 x i64> %0, <2 x i64> %1) {
+; CHECK-LABEL: maxmin_du:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.du $vr2, $vr0, $vr1
 ; CHECK-NEXT:    vmax.du $vr0, $vr0, $vr1
@@ -282,8 +287,8 @@ define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
   ret <2 x i64> %sub
 }
 
-define <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) {
-; CHECK-LABEL: umaxmin_v16i8_com1:
+define <16 x i8> @maxmin_bu_com1(<16 x i8> %0, <16 x i8> %1) {
+; CHECK-LABEL: maxmin_bu_com1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmin.bu $vr2, $vr0, $vr1
 ; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
@@ -295,6 +300,232 @@ define <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) {
   ret <16 x i8> %sub
 }
 
+;; select(icmp(a,b),sub(a,b),sub(b,a)) -> abds(a,b)
+define <16 x i8> @vabsd_b_cmp(<16 x i8> %a, <16 x i8> %b) nounwind {
+; CHECK-LABEL: vabsd_b_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.b $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp slt <16 x i8> %a, %b
+  %ab = sub <16 x i8> %a, %b
+  %ba = sub <16 x i8> %b, %a
+  %sel = select <16 x i1> %cmp, <16 x i8> %ba, <16 x i8> %ab
+  ret <16 x i8> %sel
+}
+
+define <8 x i16> @vabsd_h_cmp(<8 x i16> %a, <8 x i16> %b) nounwind {
+; CHECK-LABEL: vabsd_h_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.h $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.h $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp slt <8 x i16> %a, %b
+  %ab = sub <8 x i16> %a, %b
+  %ba = sub <8 x i16> %b, %a
+  %sel = select <8 x i1> %cmp, <8 x i16> %ba, <8 x i16> %ab
+  ret <8 x i16> %sel
+}
+
+define <4 x i32> @vabsd_w_cmp(<4 x i32> %a, <4 x i32> %b) nounwind {
+; CHECK-LABEL: vabsd_w_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.w $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.w $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp slt <4 x i32> %a, %b
+  %ab = sub <4 x i32> %a, %b
+  %ba = sub <4 x i32> %b, %a
+  %sel = select <4 x i1> %cmp, <4 x i32> %ba, <4 x i32> %ab
+  ret <4 x i32> %sel
+}
+
+define <2 x i64> @vabsd_d_cmp(<2 x i64> %a, <2 x i64> %b) nounwind {
+; CHECK-LABEL: vabsd_d_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.d $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.d $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp slt <2 x i64> %a, %b
+  %ab = sub <2 x i64> %a, %b
+  %ba = sub <2 x i64> %b, %a
+  %sel = select <2 x i1> %cmp, <2 x i64> %ba, <2 x i64> %ab
+  ret <2 x i64> %sel
+}
+
+define <16 x i8> @vabsd_bu_cmp(<16 x i8> %a, <16 x i8> %b) nounwind {
+; CHECK-LABEL: vabsd_bu_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.bu $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ult <16 x i8> %a, %b
+  %ab = sub <16 x i8> %a, %b
+  %ba = sub <16 x i8> %b, %a
+  %sel = select <16 x i1> %cmp, <16 x i8> %ba, <16 x i8> %ab
+  ret <16 x i8> %sel
+}
+
+define <8 x i16> @vabsd_hu_cmp(<8 x i16> %a, <8 x i16> %b) nounwind {
+; CHECK-LABEL: vabsd_hu_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.hu $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.h $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ult <8 x i16> %a, %b
+  %ab = sub <8 x i16> %a, %b
+  %ba = sub <8 x i16> %b, %a
+  %sel = select <8 x i1> %cmp, <8 x i16> %ba, <8 x i16> %ab
+  ret <8 x i16> %sel
+}
+
+define <4 x i32> @vabsd_wu_cmp(<4 x i32> %a, <4 x i32> %b) nounwind {
+; CHECK-LABEL: vabsd_wu_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.wu $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.w $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ult <4 x i32> %a, %b
+  %ab = sub <4 x i32> %a, %b
+  %ba = sub <4 x i32> %b, %a
+  %sel = select <4 x i1> %cmp, <4 x i32> %ba, <4 x i32> %ab
+  ret <4 x i32> %sel
+}
+
+define <2 x i64> @vabsd_du_cmp(<2 x i64> %a, <2 x i64> %b) nounwind {
+; CHECK-LABEL: vabsd_du_cmp:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.du $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.d $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ult <2 x i64> %a, %b
+  %ab = sub <2 x i64> %a, %b
+  %ba = sub <2 x i64> %b, %a
+  %sel = select <2 x i1> %cmp, <2 x i64> %ba, <2 x i64> %ab
+  ret <2 x i64> %sel
+}
+
+;; sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abds(a,b)
+define <16 x i8> @vabsd_b_select(<16 x i8> %a, <16 x i8> %b) nounwind {
+; CHECK-LABEL: vabsd_b_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.b $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp slt <16 x i8> %a, %b
+  %ab = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
+  %ba = select <16 x i1> %cmp, <16 x i8> %b, <16 x i8> %a
+  %sub = sub <16 x i8> %ba, %ab
+  ret <16 x i8> %sub
+}
+
+define <8 x i16> @vabsd_h_select(<8 x i16> %a, <8 x i16> %b) nounwind {
+; CHECK-LABEL: vabsd_h_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.h $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.h $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp sle <8 x i16> %a, %b
+  %ab = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
+  %ba = select <8 x i1> %cmp, <8 x i16> %b, <8 x i16> %a
+  %sub = sub <8 x i16> %ba, %ab
+  ret <8 x i16> %sub
+}
+
+define <4 x i32> @vabsd_w_select(<4 x i32> %a, <4 x i32> %b) nounwind {
+; CHECK-LABEL: vabsd_w_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.w $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.w $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp sgt <4 x i32> %a, %b
+  %ab = select <4 x i1> %cmp, <4 x i32> %a, <4 x i32> %b
+  %ba = select <4 x i1> %cmp, <4 x i32> %b, <4 x i32> %a
+  %sub = sub <4 x i32> %ab, %ba
+  ret <4 x i32> %sub
+}
+
+define <2 x i64> @vabsd_d_select(<2 x i64> %a, <2 x i64> %b) nounwind {
+; CHECK-LABEL: vabsd_d_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.d $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.d $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp sge <2 x i64> %a, %b
+  %ab = select <2 x i1> %cmp, <2 x i64> %a, <2 x i64> %b
+  %ba = select <2 x i1> %cmp, <2 x i64> %b, <2 x i64> %a
+  %sub = sub <2 x i64> %ab, %ba
+  ret <2 x i64> %sub
+}
+
+define <16 x i8> @vabsd_bu_select(<16 x i8> %a, <16 x i8> %b) nounwind {
+; CHECK-LABEL: vabsd_bu_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.bu $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ult <16 x i8> %a, %b
+  %ab = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
+  %ba = select <16 x i1> %cmp, <16 x i8> %b, <16 x i8> %a
+  %sub = sub <16 x i8> %ba, %ab
+  ret <16 x i8> %sub
+}
+
+define <8 x i16> @vabsd_hu_select(<8 x i16> %a, <8 x i16> %b) nounwind {
+; CHECK-LABEL: vabsd_hu_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.hu $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.h $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ule <8 x i16> %a, %b
+  %ab = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
+  %ba = select <8 x i1> %cmp, <8 x i16> %b, <8 x i16> %a
+  %sub = sub <8 x i16> %ba, %ab
+  ret <8 x i16> %sub
+}
+
+define <4 x i32> @vabsd_wu_select(<4 x i32> %a, <4 x i32> %b) nounwind {
+; CHECK-LABEL: vabsd_wu_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.wu $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.w $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp ugt <4 x i32> %a, %b
+  %ab = select <4 x i1> %cmp, <4 x i32> %a, <4 x i32> %b
+  %ba = select <4 x i1> %cmp, <4 x i32> %b, <4 x i32> %a
+  %sub = sub <4 x i32> %ab, %ba
+  ret <4 x i32> %sub
+}
+
+define <2 x i64> @vabsd_du_select(<2 x i64> %a, <2 x i64> %b) nounwind {
+; CHECK-LABEL: vabsd_du_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmin.du $vr2, $vr0, $vr1
+; CHECK-NEXT:    vmax.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.d $vr0, $vr0, $vr2
+; CHECK-NEXT:    ret
+  %cmp = icmp uge <2 x i64> %a, %b
+  %ab = select <2 x i1> %cmp, <2 x i64> %a, <2 x i64> %b
+  %ba = select <2 x i1> %cmp, <2 x i64> %b, <2 x i64> %a
+  %sub = sub <2 x i64> %ab, %ba
+  ret <2 x i64> %sub
+}
+
 declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1)
 
 declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1)

>From fd6e3aafbf9a3abf48ae3f0b0ece8cdfc5ea9d19 Mon Sep 17 00:00:00 2001
From: tangaac <tangyan01 at loongson.cn>
Date: Wed, 26 Mar 2025 16:08:17 +0800
Subject: [PATCH 5/5] fix error in intrinsic suffix

---
 .../LoongArch/lasx/ir-instruction/absd.ll     | 108 +++++++++---------
 1 file changed, 54 insertions(+), 54 deletions(-)

diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
index a24a4165e00d5..a81a75f2136d9 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
@@ -23,7 +23,7 @@ define <32 x i8> @xvabsd_b(<32 x i8> %a, <32 x i8> %b) {
   %a.sext = sext <32 x i8> %a to <32 x i16>
   %b.sext = sext <32 x i8> %b to <32 x i16>
   %sub = sub <32 x i16> %a.sext, %b.sext
-  %abs = call <32 x i16> @llvm.abs.v16i16(<32 x i16> %sub, i1 true)
+  %abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true)
   %trunc = trunc <32 x i16> %abs to <32 x i8>
   ret <32 x i8> %trunc
 }
@@ -38,7 +38,7 @@ define <16 x i16> @xvabsd_h(<16 x i16> %a, <16 x i16> %b) {
   %a.sext = sext <16 x i16> %a to <16 x i32>
   %b.sext = sext <16 x i16> %b to <16 x i32>
   %sub = sub <16 x i32> %a.sext, %b.sext
-  %abs = call <16 x i32> @llvm.abs.v8i32(<16 x i32> %sub, i1 true)
+  %abs = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %sub, i1 true)
   %trunc = trunc <16 x i32> %abs to <16 x i16>
   ret <16 x i16> %trunc
 }
@@ -53,7 +53,7 @@ define <8 x i32> @xvabsd_w(<8 x i32> %a, <8 x i32> %b) {
   %a.sext = sext <8 x i32> %a to <8 x i64>
   %b.sext = sext <8 x i32> %b to <8 x i64>
   %sub = sub <8 x i64> %a.sext, %b.sext
-  %abs = call <8 x i64> @llvm.abs.v4i64(<8 x i64> %sub, i1 true)
+  %abs = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %sub, i1 true)
   %trunc = trunc <8 x i64> %abs to <8 x i32>
   ret <8 x i32> %trunc
 }
@@ -68,7 +68,7 @@ define <4 x i64> @xvabsd_d(<4 x i64> %a, <4 x i64> %b) {
   %a.sext = sext <4 x i64> %a to <4 x i128>
   %b.sext = sext <4 x i64> %b to <4 x i128>
   %sub = sub <4 x i128> %a.sext, %b.sext
-  %abs = call <4 x i128> @llvm.abs.v2i128(<4 x i128> %sub, i1 true)
+  %abs = call <4 x i128> @llvm.abs.v4i128(<4 x i128> %sub, i1 true)
   %trunc = trunc <4 x i128> %abs to <4 x i64>
   ret <4 x i64> %trunc
 }
@@ -83,7 +83,7 @@ define <32 x i8> @xvabsd_bu(<32 x i8> %a, <32 x i8> %b) {
   %a.zext = zext <32 x i8> %a to <32 x i16>
   %b.zext = zext <32 x i8> %b to <32 x i16>
   %sub = sub <32 x i16> %a.zext, %b.zext
-  %abs = call <32 x i16> @llvm.abs.v16i16(<32 x i16> %sub, i1 true)
+  %abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true)
   %trunc = trunc <32 x i16> %abs to <32 x i8>
   ret <32 x i8> %trunc
 }
@@ -98,7 +98,7 @@ define <16 x i16> @xvabsd_hu(<16 x i16> %a, <16 x i16> %b) {
   %a.zext = zext <16 x i16> %a to <16 x i32>
   %b.zext = zext <16 x i16> %b to <16 x i32>
   %sub = sub <16 x i32> %a.zext, %b.zext
-  %abs = call <16 x i32> @llvm.abs.v8i32(<16 x i32> %sub, i1 true)
+  %abs = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %sub, i1 true)
   %trunc = trunc <16 x i32> %abs to <16 x i16>
   ret <16 x i16> %trunc
 }
@@ -113,7 +113,7 @@ define <8 x i32> @xvabsd_wu(<8 x i32> %a, <8 x i32> %b) {
   %a.zext = zext <8 x i32> %a to <8 x i64>
   %b.zext = zext <8 x i32> %b to <8 x i64>
   %sub = sub <8 x i64> %a.zext, %b.zext
-  %abs = call <8 x i64> @llvm.abs.v4i64(<8 x i64> %sub, i1 true)
+  %abs = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %sub, i1 true)
   %trunc = trunc <8 x i64> %abs to <8 x i32>
   ret <8 x i32> %trunc
 }
@@ -128,7 +128,7 @@ define <4 x i64> @xvabsd_du(<4 x i64> %a, <4 x i64> %b) {
   %a.zext = zext <4 x i64> %a to <4 x i128>
   %b.zext = zext <4 x i64> %b to <4 x i128>
   %sub = sub <4 x i128> %a.zext, %b.zext
-  %abs = call <4 x i128> @llvm.abs.v2i128(<4 x i128> %sub, i1 true)
+  %abs = call <4 x i128> @llvm.abs.v4i128(<4 x i128> %sub, i1 true)
   %trunc = trunc <4 x i128> %abs to <4 x i64>
   ret <4 x i64> %trunc
 }
@@ -142,7 +142,7 @@ define <32 x i8> @xvabsd_b_nsw(<32 x i8> %a, <32 x i8> %b) {
 ; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %sub = sub nsw <32 x i8> %a, %b
-  %abs = call <32 x i8> @llvm.abs.v16i8(<32 x i8> %sub, i1 true)
+  %abs = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %sub, i1 true)
   ret <32 x i8> %abs
 }
 
@@ -154,7 +154,7 @@ define <16 x i16> @xvabsd_h_nsw(<16 x i16> %a, <16 x i16> %b) {
 ; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %sub = sub nsw <16 x i16> %a, %b
-  %abs = call <16 x i16> @llvm.abs.v8i16(<16 x i16> %sub, i1 true)
+  %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
   ret <16 x i16> %abs
 }
 
@@ -166,7 +166,7 @@ define <8 x i32> @xvabsd_w_nsw(<8 x i32> %a, <8 x i32> %b) {
 ; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %sub = sub nsw <8 x i32> %a, %b
-  %abs = call <8 x i32> @llvm.abs.v4i32(<8 x i32> %sub, i1 true)
+  %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
   ret <8 x i32> %abs
 }
 
@@ -178,7 +178,7 @@ define <4 x i64> @xvabsd_d_nsw(<4 x i64> %a, <4 x i64> %b) {
 ; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
 ; CHECK-NEXT:    ret
   %sub = sub nsw <4 x i64> %a, %b
-  %abs = call <4 x i64> @llvm.abs.v2i64(<4 x i64> %sub, i1 true)
+  %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
   ret <4 x i64> %abs
 }
 
@@ -190,8 +190,8 @@ define <32 x i8> @maxmin_b(<32 x i8> %0, <32 x i8> %1) {
 ; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <32 x i8> @llvm.smax.v16i8(<32 x i8> %0, <32 x i8> %1)
-  %b = tail call <32 x i8> @llvm.smin.v16i8(<32 x i8> %0, <32 x i8> %1)
+  %a = tail call <32 x i8> @llvm.smax.v32i8(<32 x i8> %0, <32 x i8> %1)
+  %b = tail call <32 x i8> @llvm.smin.v32i8(<32 x i8> %0, <32 x i8> %1)
   %sub = sub <32 x i8> %a, %b
   ret <32 x i8> %sub
 }
@@ -203,8 +203,8 @@ define <16 x i16> @maxmin_h(<16 x i16> %0, <16 x i16> %1) {
 ; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <16 x i16> @llvm.smax.v8i16(<16 x i16> %0, <16 x i16> %1)
-  %b = tail call <16 x i16> @llvm.smin.v8i16(<16 x i16> %0, <16 x i16> %1)
+  %a = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> %0, <16 x i16> %1)
+  %b = tail call <16 x i16> @llvm.smin.v16i16(<16 x i16> %0, <16 x i16> %1)
   %sub = sub <16 x i16> %a, %b
   ret <16 x i16> %sub
 }
@@ -216,8 +216,8 @@ define <8 x i32> @maxmin_w(<8 x i32> %0, <8 x i32> %1) {
 ; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <8 x i32> @llvm.smax.v4i32(<8 x i32> %0, <8 x i32> %1)
-  %b = tail call <8 x i32> @llvm.smin.v4i32(<8 x i32> %0, <8 x i32> %1)
+  %a = tail call <8 x i32> @llvm.smax.v8i32(<8 x i32> %0, <8 x i32> %1)
+  %b = tail call <8 x i32> @llvm.smin.v8i32(<8 x i32> %0, <8 x i32> %1)
   %sub = sub <8 x i32> %a, %b
   ret <8 x i32> %sub
 }
@@ -229,8 +229,8 @@ define <4 x i64> @maxmin_d(<4 x i64> %0, <4 x i64> %1) {
 ; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <4 x i64> @llvm.smax.v2i64(<4 x i64> %0, <4 x i64> %1)
-  %b = tail call <4 x i64> @llvm.smin.v2i64(<4 x i64> %0, <4 x i64> %1)
+  %a = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %0, <4 x i64> %1)
+  %b = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %0, <4 x i64> %1)
   %sub = sub <4 x i64> %a, %b
   ret <4 x i64> %sub
 }
@@ -242,8 +242,8 @@ define <32 x i8> @maxmin_bu(<32 x i8> %0, <32 x i8> %1) {
 ; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <32 x i8> @llvm.umax.v16i8(<32 x i8> %0, <32 x i8> %1)
-  %b = tail call <32 x i8> @llvm.umin.v16i8(<32 x i8> %0, <32 x i8> %1)
+  %a = tail call <32 x i8> @llvm.umax.v32i8(<32 x i8> %0, <32 x i8> %1)
+  %b = tail call <32 x i8> @llvm.umin.v32i8(<32 x i8> %0, <32 x i8> %1)
   %sub = sub <32 x i8> %a, %b
   ret <32 x i8> %sub
 }
@@ -255,8 +255,8 @@ define <16 x i16> @maxmin_hu(<16 x i16> %0, <16 x i16> %1) {
 ; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <16 x i16> @llvm.umax.v8i16(<16 x i16> %0, <16 x i16> %1)
-  %b = tail call <16 x i16> @llvm.umin.v8i16(<16 x i16> %0, <16 x i16> %1)
+  %a = tail call <16 x i16> @llvm.umax.v16i16(<16 x i16> %0, <16 x i16> %1)
+  %b = tail call <16 x i16> @llvm.umin.v16i16(<16 x i16> %0, <16 x i16> %1)
   %sub = sub <16 x i16> %a, %b
   ret <16 x i16> %sub
 }
@@ -268,8 +268,8 @@ define <8 x i32> @maxmin_wu(<8 x i32> %0, <8 x i32> %1) {
 ; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <8 x i32> @llvm.umax.v4i32(<8 x i32> %0, <8 x i32> %1)
-  %b = tail call <8 x i32> @llvm.umin.v4i32(<8 x i32> %0, <8 x i32> %1)
+  %a = tail call <8 x i32> @llvm.umax.v8i32(<8 x i32> %0, <8 x i32> %1)
+  %b = tail call <8 x i32> @llvm.umin.v8i32(<8 x i32> %0, <8 x i32> %1)
   %sub = sub <8 x i32> %a, %b
   ret <8 x i32> %sub
 }
@@ -281,8 +281,8 @@ define <4 x i64> @maxmin_du(<4 x i64> %0, <4 x i64> %1) {
 ; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <4 x i64> @llvm.umax.v2i64(<4 x i64> %0, <4 x i64> %1)
-  %b = tail call <4 x i64> @llvm.umin.v2i64(<4 x i64> %0, <4 x i64> %1)
+  %a = tail call <4 x i64> @llvm.umax.v4i64(<4 x i64> %0, <4 x i64> %1)
+  %b = tail call <4 x i64> @llvm.umin.v4i64(<4 x i64> %0, <4 x i64> %1)
   %sub = sub <4 x i64> %a, %b
   ret <4 x i64> %sub
 }
@@ -294,8 +294,8 @@ define <32 x i8> @maxmin_bu_com1(<32 x i8> %0, <32 x i8> %1) {
 ; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
 ; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr2
 ; CHECK-NEXT:    ret
-  %a = tail call <32 x i8> @llvm.umax.v16i8(<32 x i8> %0, <32 x i8> %1)
-  %b = tail call <32 x i8> @llvm.umin.v16i8(<32 x i8> %1, <32 x i8> %0)
+  %a = tail call <32 x i8> @llvm.umax.v32i8(<32 x i8> %0, <32 x i8> %1)
+  %b = tail call <32 x i8> @llvm.umin.v32i8(<32 x i8> %1, <32 x i8> %0)
   %sub = sub <32 x i8> %a, %b
   ret <32 x i8> %sub
 }
@@ -526,32 +526,32 @@ define <4 x i64> @xvabsd_du_select(<4 x i64> %a, <4 x i64> %b) nounwind {
   ret <4 x i64> %sub
 }
 
-declare <32 x i8> @llvm.abs.v16i8(<32 x i8>, i1)
+declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1)
 
-declare <16 x i16> @llvm.abs.v8i16(<16 x i16>, i1)
-declare <32 x i16> @llvm.abs.v16i16(<32 x i16>, i1)
+declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
+declare <32 x i16> @llvm.abs.v32i16(<32 x i16>, i1)
 
-declare <8 x i32> @llvm.abs.v4i32(<8 x i32>, i1)
-declare <16 x i32> @llvm.abs.v8i32(<16 x i32>, i1)
+declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
+declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1)
 
-declare <4 x i64> @llvm.abs.v2i64(<4 x i64>, i1)
-declare <8 x i64> @llvm.abs.v4i64(<8 x i64>, i1)
+declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
+declare <8 x i64> @llvm.abs.v8i64(<8 x i64>, i1)
 
-declare <4 x i128> @llvm.abs.v2i128(<4 x i128>, i1)
+declare <4 x i128> @llvm.abs.v4i128(<4 x i128>, i1)
 
-declare <32 x i8> @llvm.smax.v16i8(<32 x i8>, <32 x i8>)
-declare <16 x i16> @llvm.smax.v8i16(<16 x i16>, <16 x i16>)
-declare <8 x i32> @llvm.smax.v4i32(<8 x i32>, <8 x i32>)
-declare <4 x i64> @llvm.smax.v2i64(<4 x i64>, <4 x i64>)
-declare <32 x i8> @llvm.smin.v16i8(<32 x i8>, <32 x i8>)
-declare <16 x i16> @llvm.smin.v8i16(<16 x i16>, <16 x i16>)
-declare <8 x i32> @llvm.smin.v4i32(<8 x i32>, <8 x i32>)
-declare <4 x i64> @llvm.smin.v2i64(<4 x i64>, <4 x i64>)
-declare <32 x i8> @llvm.umax.v16i8(<32 x i8>, <32 x i8>)
-declare <16 x i16> @llvm.umax.v8i16(<16 x i16>, <16 x i16>)
-declare <8 x i32> @llvm.umax.v4i32(<8 x i32>, <8 x i32>)
-declare <4 x i64> @llvm.umax.v2i64(<4 x i64>, <4 x i64>)
-declare <32 x i8> @llvm.umin.v16i8(<32 x i8>, <32 x i8>)
-declare <16 x i16> @llvm.umin.v8i16(<16 x i16>, <16 x i16>)
-declare <8 x i32> @llvm.umin.v4i32(<8 x i32>, <8 x i32>)
-declare <4 x i64> @llvm.umin.v2i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.smax.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.smin.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.umax.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.umax.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.umax.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.umin.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.umin.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>)



More information about the llvm-commits mailing list