[llvm] [LoongArch] Pre-commit tests for sadd/ssub/uadd/usub intrinsics (PR #158176)
Zhaoxin Yang via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 11 21:01:32 PDT 2025
https://github.com/ylzsx updated https://github.com/llvm/llvm-project/pull/158176
>From 7f017ae0ac5312d377d1a1edea72bae968a49953 Mon Sep 17 00:00:00 2001
From: yangzhaoxin <yangzhaoxin at loongson.cn>
Date: Fri, 12 Sep 2025 09:44:03 +0800
Subject: [PATCH] [LoongArch] Pre-commit tests for sadd/ssub/uadd/usub
intrinsics
---
.../LoongArch/lasx/ir-instruction/sadd-sat.ll | 81 +++++++++++++++++
.../LoongArch/lasx/ir-instruction/ssub-sat.ll | 86 +++++++++++++++++++
.../LoongArch/lasx/ir-instruction/uadd-sat.ll | 55 ++++++++++++
.../LoongArch/lasx/ir-instruction/usub-sat.ll | 48 +++++++++++
.../LoongArch/lsx/ir-instruction/sadd-sat.ll | 81 +++++++++++++++++
.../LoongArch/lsx/ir-instruction/ssub-sat.ll | 86 +++++++++++++++++++
.../LoongArch/lsx/ir-instruction/uadd-sat.ll | 55 ++++++++++++
.../LoongArch/lsx/ir-instruction/usub-sat.ll | 48 +++++++++++
8 files changed, 540 insertions(+)
create mode 100644 llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sadd-sat.ll
create mode 100644 llvm/test/CodeGen/LoongArch/lasx/ir-instruction/ssub-sat.ll
create mode 100644 llvm/test/CodeGen/LoongArch/lasx/ir-instruction/uadd-sat.ll
create mode 100644 llvm/test/CodeGen/LoongArch/lasx/ir-instruction/usub-sat.ll
create mode 100644 llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sadd-sat.ll
create mode 100644 llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ssub-sat.ll
create mode 100644 llvm/test/CodeGen/LoongArch/lsx/ir-instruction/uadd-sat.ll
create mode 100644 llvm/test/CodeGen/LoongArch/lsx/ir-instruction/usub-sat.ll
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sadd-sat.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sadd-sat.ll
new file mode 100644
index 0000000000000..f41a68c79ad37
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sadd-sat.ll
@@ -0,0 +1,81 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32
+; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64
+
+define <32 x i8> @vsadd_b(<32 x i8> %a, <32 x i8> %b) {
+; CHECK-LABEL: vsadd_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvadd.b $xr2, $xr0, $xr1
+; CHECK-NEXT: xvslt.b $xr0, $xr2, $xr0
+; CHECK-NEXT: xvslti.b $xr1, $xr1, 0
+; CHECK-NEXT: xvxor.v $xr0, $xr1, $xr0
+; CHECK-NEXT: xvsrai.b $xr1, $xr2, 7
+; CHECK-NEXT: xvbitrevi.b $xr1, $xr1, 7
+; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
+; CHECK-NEXT: ret
+ %ret = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+ ret <32 x i8> %ret
+}
+
+define <16 x i16> @vsadd_h(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: vsadd_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvadd.h $xr2, $xr0, $xr1
+; CHECK-NEXT: xvslt.h $xr0, $xr2, $xr0
+; CHECK-NEXT: xvslti.h $xr1, $xr1, 0
+; CHECK-NEXT: xvxor.v $xr0, $xr1, $xr0
+; CHECK-NEXT: xvsrai.h $xr1, $xr2, 15
+; CHECK-NEXT: xvbitrevi.h $xr1, $xr1, 15
+; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
+; CHECK-NEXT: ret
+ %ret = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+ ret <16 x i16> %ret
+}
+
+define <8 x i32> @vsadd_w(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: vsadd_w:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvadd.w $xr2, $xr0, $xr1
+; CHECK-NEXT: xvslt.w $xr0, $xr2, $xr0
+; CHECK-NEXT: xvslti.w $xr1, $xr1, 0
+; CHECK-NEXT: xvxor.v $xr0, $xr1, $xr0
+; CHECK-NEXT: xvsrai.w $xr1, $xr2, 31
+; CHECK-NEXT: xvbitrevi.w $xr1, $xr1, 31
+; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
+; CHECK-NEXT: ret
+ %ret = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %a, <8 x i32> %b)
+ ret <8 x i32> %ret
+}
+
+define <4 x i64> @vsadd_d(<4 x i64> %a, <4 x i64> %b) {
+; LA32-LABEL: vsadd_d:
+; LA32: # %bb.0:
+; LA32-NEXT: xvadd.d $xr2, $xr0, $xr1
+; LA32-NEXT: xvslt.d $xr0, $xr2, $xr0
+; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA32-NEXT: xvld $xr3, $a0, %pc_lo12(.LCPI3_0)
+; LA32-NEXT: xvslti.d $xr1, $xr1, 0
+; LA32-NEXT: xvxor.v $xr0, $xr1, $xr0
+; LA32-NEXT: xvsrai.d $xr1, $xr2, 63
+; LA32-NEXT: xvxor.v $xr1, $xr1, $xr3
+; LA32-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: vsadd_d:
+; LA64: # %bb.0:
+; LA64-NEXT: xvadd.d $xr2, $xr0, $xr1
+; LA64-NEXT: xvslt.d $xr0, $xr2, $xr0
+; LA64-NEXT: xvslti.d $xr1, $xr1, 0
+; LA64-NEXT: xvxor.v $xr0, $xr1, $xr0
+; LA64-NEXT: xvsrai.d $xr1, $xr2, 63
+; LA64-NEXT: xvbitrevi.d $xr1, $xr1, 63
+; LA64-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
+; LA64-NEXT: ret
+ %ret = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %a, <4 x i64> %b)
+ ret <4 x i64> %ret
+}
+
+declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64>, <4 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/ssub-sat.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/ssub-sat.ll
new file mode 100644
index 0000000000000..5bef58998216a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/ssub-sat.ll
@@ -0,0 +1,86 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32
+; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64
+
+define <32 x i8> @vssub_b(<32 x i8> %a, <32 x i8> %b) {
+; CHECK-LABEL: vssub_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvrepli.b $xr2, 0
+; CHECK-NEXT: xvslt.b $xr2, $xr2, $xr1
+; CHECK-NEXT: xvsub.b $xr1, $xr0, $xr1
+; CHECK-NEXT: xvslt.b $xr0, $xr1, $xr0
+; CHECK-NEXT: xvxor.v $xr0, $xr2, $xr0
+; CHECK-NEXT: xvsrai.b $xr2, $xr1, 7
+; CHECK-NEXT: xvbitrevi.b $xr2, $xr2, 7
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
+; CHECK-NEXT: ret
+ %ret = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+ ret <32 x i8> %ret
+}
+
+define <16 x i16> @vssub_h(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: vssub_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvrepli.b $xr2, 0
+; CHECK-NEXT: xvslt.h $xr2, $xr2, $xr1
+; CHECK-NEXT: xvsub.h $xr1, $xr0, $xr1
+; CHECK-NEXT: xvslt.h $xr0, $xr1, $xr0
+; CHECK-NEXT: xvxor.v $xr0, $xr2, $xr0
+; CHECK-NEXT: xvsrai.h $xr2, $xr1, 15
+; CHECK-NEXT: xvbitrevi.h $xr2, $xr2, 15
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
+; CHECK-NEXT: ret
+ %ret = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+ ret <16 x i16> %ret
+}
+
+define <8 x i32> @vssub_w(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: vssub_w:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvrepli.b $xr2, 0
+; CHECK-NEXT: xvslt.w $xr2, $xr2, $xr1
+; CHECK-NEXT: xvsub.w $xr1, $xr0, $xr1
+; CHECK-NEXT: xvslt.w $xr0, $xr1, $xr0
+; CHECK-NEXT: xvxor.v $xr0, $xr2, $xr0
+; CHECK-NEXT: xvsrai.w $xr2, $xr1, 31
+; CHECK-NEXT: xvbitrevi.w $xr2, $xr2, 31
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
+; CHECK-NEXT: ret
+ %ret = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %a, <8 x i32> %b)
+ ret <8 x i32> %ret
+}
+
+define <4 x i64> @vssub_d(<4 x i64> %a, <4 x i64> %b) {
+; LA32-LABEL: vssub_d:
+; LA32: # %bb.0:
+; LA32-NEXT: xvrepli.b $xr2, 0
+; LA32-NEXT: xvslt.d $xr2, $xr2, $xr1
+; LA32-NEXT: xvsub.d $xr1, $xr0, $xr1
+; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA32-NEXT: xvld $xr3, $a0, %pc_lo12(.LCPI3_0)
+; LA32-NEXT: xvslt.d $xr0, $xr1, $xr0
+; LA32-NEXT: xvxor.v $xr0, $xr2, $xr0
+; LA32-NEXT: xvsrai.d $xr2, $xr1, 63
+; LA32-NEXT: xvxor.v $xr2, $xr2, $xr3
+; LA32-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: vssub_d:
+; LA64: # %bb.0:
+; LA64-NEXT: xvrepli.b $xr2, 0
+; LA64-NEXT: xvslt.d $xr2, $xr2, $xr1
+; LA64-NEXT: xvsub.d $xr1, $xr0, $xr1
+; LA64-NEXT: xvslt.d $xr0, $xr1, $xr0
+; LA64-NEXT: xvxor.v $xr0, $xr2, $xr0
+; LA64-NEXT: xvsrai.d $xr2, $xr1, 63
+; LA64-NEXT: xvbitrevi.d $xr2, $xr2, 63
+; LA64-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
+; LA64-NEXT: ret
+ %ret = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %a, <4 x i64> %b)
+ ret <4 x i64> %ret
+}
+
+declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64>, <4 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/uadd-sat.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/uadd-sat.ll
new file mode 100644
index 0000000000000..489aac874167d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/uadd-sat.ll
@@ -0,0 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
+
+define <32 x i8> @vuadd_b(<32 x i8> %a, <32 x i8> %b) {
+; CHECK-LABEL: vuadd_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvxori.b $xr2, $xr1, 255
+; CHECK-NEXT: xvmin.bu $xr0, $xr0, $xr2
+; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1
+; CHECK-NEXT: ret
+ %ret = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+ ret <32 x i8> %ret
+}
+
+define <16 x i16> @vuadd_h(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: vuadd_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvrepli.b $xr2, -1
+; CHECK-NEXT: xvxor.v $xr2, $xr1, $xr2
+; CHECK-NEXT: xvmin.hu $xr0, $xr0, $xr2
+; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1
+; CHECK-NEXT: ret
+ %ret = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+ ret <16 x i16> %ret
+}
+
+define <8 x i32> @vuadd_w(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: vuadd_w:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvrepli.b $xr2, -1
+; CHECK-NEXT: xvxor.v $xr2, $xr1, $xr2
+; CHECK-NEXT: xvmin.wu $xr0, $xr0, $xr2
+; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1
+; CHECK-NEXT: ret
+ %ret = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %a, <8 x i32> %b)
+ ret <8 x i32> %ret
+}
+
+define <4 x i64> @vuadd_d(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-LABEL: vuadd_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvrepli.b $xr2, -1
+; CHECK-NEXT: xvxor.v $xr2, $xr1, $xr2
+; CHECK-NEXT: xvmin.du $xr0, $xr0, $xr2
+; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT: ret
+ %ret = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %a, <4 x i64> %b)
+ ret <4 x i64> %ret
+}
+
+declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64>, <4 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/usub-sat.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/usub-sat.ll
new file mode 100644
index 0000000000000..3957424a138a9
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/usub-sat.ll
@@ -0,0 +1,48 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
+
+define <32 x i8> @vusub_b(<32 x i8> %a, <32 x i8> %b) {
+; CHECK-LABEL: vusub_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.b $xr0, $xr0, $xr1
+; CHECK-NEXT: ret
+ %ret = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
+ ret <32 x i8> %ret
+}
+
+define <16 x i16> @vusub_h(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: vusub_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.h $xr0, $xr0, $xr1
+; CHECK-NEXT: ret
+ %ret = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
+ ret <16 x i16> %ret
+}
+
+define <8 x i32> @vusub_w(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: vusub_w:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmax.wu $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.w $xr0, $xr0, $xr1
+; CHECK-NEXT: ret
+ %ret = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %a, <8 x i32> %b)
+ ret <8 x i32> %ret
+}
+
+define <4 x i64> @vusub_d(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-LABEL: vusub_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmax.du $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.d $xr0, $xr0, $xr1
+; CHECK-NEXT: ret
+ %ret = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %a, <4 x i64> %b)
+ ret <4 x i64> %ret
+}
+
+declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.usub.sat.v4i64(<4 x i64>, <4 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sadd-sat.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sadd-sat.ll
new file mode 100644
index 0000000000000..dd5842fcbec65
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sadd-sat.ll
@@ -0,0 +1,81 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA32
+; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA64
+
+define <16 x i8> @vsadd_b(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vsadd_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vadd.b $vr2, $vr0, $vr1
+; CHECK-NEXT: vslt.b $vr0, $vr2, $vr0
+; CHECK-NEXT: vslti.b $vr1, $vr1, 0
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vsrai.b $vr1, $vr2, 7
+; CHECK-NEXT: vbitrevi.b $vr1, $vr1, 7
+; CHECK-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
+; CHECK-NEXT: ret
+ %ret = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %ret
+}
+
+define <8 x i16> @vsadd_h(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vsadd_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vadd.h $vr2, $vr0, $vr1
+; CHECK-NEXT: vslt.h $vr0, $vr2, $vr0
+; CHECK-NEXT: vslti.h $vr1, $vr1, 0
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vsrai.h $vr1, $vr2, 15
+; CHECK-NEXT: vbitrevi.h $vr1, $vr1, 15
+; CHECK-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
+; CHECK-NEXT: ret
+ %ret = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i16> %ret
+}
+
+define <4 x i32> @vsadd_w(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vsadd_w:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vadd.w $vr2, $vr0, $vr1
+; CHECK-NEXT: vslt.w $vr0, $vr2, $vr0
+; CHECK-NEXT: vslti.w $vr1, $vr1, 0
+; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: vsrai.w $vr1, $vr2, 31
+; CHECK-NEXT: vbitrevi.w $vr1, $vr1, 31
+; CHECK-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
+; CHECK-NEXT: ret
+ %ret = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
+ ret <4 x i32> %ret
+}
+
+define <2 x i64> @vsadd_d(<2 x i64> %a, <2 x i64> %b) {
+; LA32-LABEL: vsadd_d:
+; LA32: # %bb.0:
+; LA32-NEXT: vadd.d $vr2, $vr0, $vr1
+; LA32-NEXT: vslt.d $vr0, $vr2, $vr0
+; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA32-NEXT: vld $vr3, $a0, %pc_lo12(.LCPI3_0)
+; LA32-NEXT: vslti.d $vr1, $vr1, 0
+; LA32-NEXT: vxor.v $vr0, $vr1, $vr0
+; LA32-NEXT: vsrai.d $vr1, $vr2, 63
+; LA32-NEXT: vxor.v $vr1, $vr1, $vr3
+; LA32-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: vsadd_d:
+; LA64: # %bb.0:
+; LA64-NEXT: vadd.d $vr2, $vr0, $vr1
+; LA64-NEXT: vslt.d $vr0, $vr2, $vr0
+; LA64-NEXT: vslti.d $vr1, $vr1, 0
+; LA64-NEXT: vxor.v $vr0, $vr1, $vr0
+; LA64-NEXT: vsrai.d $vr1, $vr2, 63
+; LA64-NEXT: vbitrevi.d $vr1, $vr1, 63
+; LA64-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
+; LA64-NEXT: ret
+ %ret = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i64> %ret
+}
+
+declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64>, <2 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ssub-sat.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ssub-sat.ll
new file mode 100644
index 0000000000000..e330b7e43b42e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ssub-sat.ll
@@ -0,0 +1,86 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA32
+; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA64
+
+define <16 x i8> @vssub_b(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vssub_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vrepli.b $vr2, 0
+; CHECK-NEXT: vslt.b $vr2, $vr2, $vr1
+; CHECK-NEXT: vsub.b $vr1, $vr0, $vr1
+; CHECK-NEXT: vslt.b $vr0, $vr1, $vr0
+; CHECK-NEXT: vxor.v $vr0, $vr2, $vr0
+; CHECK-NEXT: vsrai.b $vr2, $vr1, 7
+; CHECK-NEXT: vbitrevi.b $vr2, $vr2, 7
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0
+; CHECK-NEXT: ret
+ %ret = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %ret
+}
+
+define <8 x i16> @vssub_h(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vssub_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vrepli.b $vr2, 0
+; CHECK-NEXT: vslt.h $vr2, $vr2, $vr1
+; CHECK-NEXT: vsub.h $vr1, $vr0, $vr1
+; CHECK-NEXT: vslt.h $vr0, $vr1, $vr0
+; CHECK-NEXT: vxor.v $vr0, $vr2, $vr0
+; CHECK-NEXT: vsrai.h $vr2, $vr1, 15
+; CHECK-NEXT: vbitrevi.h $vr2, $vr2, 15
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0
+; CHECK-NEXT: ret
+ %ret = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i16> %ret
+}
+
+define <4 x i32> @vssub_w(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vssub_w:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vrepli.b $vr2, 0
+; CHECK-NEXT: vslt.w $vr2, $vr2, $vr1
+; CHECK-NEXT: vsub.w $vr1, $vr0, $vr1
+; CHECK-NEXT: vslt.w $vr0, $vr1, $vr0
+; CHECK-NEXT: vxor.v $vr0, $vr2, $vr0
+; CHECK-NEXT: vsrai.w $vr2, $vr1, 31
+; CHECK-NEXT: vbitrevi.w $vr2, $vr2, 31
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0
+; CHECK-NEXT: ret
+ %ret = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
+ ret <4 x i32> %ret
+}
+
+define <2 x i64> @vssub_d(<2 x i64> %a, <2 x i64> %b) {
+; LA32-LABEL: vssub_d:
+; LA32: # %bb.0:
+; LA32-NEXT: vrepli.b $vr2, 0
+; LA32-NEXT: vslt.d $vr2, $vr2, $vr1
+; LA32-NEXT: vsub.d $vr1, $vr0, $vr1
+; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA32-NEXT: vld $vr3, $a0, %pc_lo12(.LCPI3_0)
+; LA32-NEXT: vslt.d $vr0, $vr1, $vr0
+; LA32-NEXT: vxor.v $vr0, $vr2, $vr0
+; LA32-NEXT: vsrai.d $vr2, $vr1, 63
+; LA32-NEXT: vxor.v $vr2, $vr2, $vr3
+; LA32-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: vssub_d:
+; LA64: # %bb.0:
+; LA64-NEXT: vrepli.b $vr2, 0
+; LA64-NEXT: vslt.d $vr2, $vr2, $vr1
+; LA64-NEXT: vsub.d $vr1, $vr0, $vr1
+; LA64-NEXT: vslt.d $vr0, $vr1, $vr0
+; LA64-NEXT: vxor.v $vr0, $vr2, $vr0
+; LA64-NEXT: vsrai.d $vr2, $vr1, 63
+; LA64-NEXT: vbitrevi.d $vr2, $vr2, 63
+; LA64-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0
+; LA64-NEXT: ret
+ %ret = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i64> %ret
+}
+
+declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64>, <2 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/uadd-sat.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/uadd-sat.ll
new file mode 100644
index 0000000000000..1e8aa52451d47
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/uadd-sat.ll
@@ -0,0 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s
+; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s
+
+define <16 x i8> @vuadd_b(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vuadd_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vxori.b $vr2, $vr1, 255
+; CHECK-NEXT: vmin.bu $vr0, $vr0, $vr2
+; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1
+; CHECK-NEXT: ret
+ %ret = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %ret
+}
+
+define <8 x i16> @vuadd_h(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vuadd_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vrepli.b $vr2, -1
+; CHECK-NEXT: vxor.v $vr2, $vr1, $vr2
+; CHECK-NEXT: vmin.hu $vr0, $vr0, $vr2
+; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1
+; CHECK-NEXT: ret
+ %ret = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i16> %ret
+}
+
+define <4 x i32> @vuadd_w(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vuadd_w:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vrepli.b $vr2, -1
+; CHECK-NEXT: vxor.v $vr2, $vr1, $vr2
+; CHECK-NEXT: vmin.wu $vr0, $vr0, $vr2
+; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1
+; CHECK-NEXT: ret
+ %ret = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
+ ret <4 x i32> %ret
+}
+
+define <2 x i64> @vuadd_d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vuadd_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vrepli.b $vr2, -1
+; CHECK-NEXT: vxor.v $vr2, $vr1, $vr2
+; CHECK-NEXT: vmin.du $vr0, $vr0, $vr2
+; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT: ret
+ %ret = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i64> %ret
+}
+
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/usub-sat.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/usub-sat.ll
new file mode 100644
index 0000000000000..3ee1b2b8996d7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/usub-sat.ll
@@ -0,0 +1,48 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s
+; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s
+
+define <16 x i8> @vusub_b(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vusub_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.b $vr0, $vr0, $vr1
+; CHECK-NEXT: ret
+ %ret = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %ret
+}
+
+define <8 x i16> @vusub_h(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vusub_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.h $vr0, $vr0, $vr1
+; CHECK-NEXT: ret
+ %ret = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i16> %ret
+}
+
+define <4 x i32> @vusub_w(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vusub_w:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.w $vr0, $vr0, $vr1
+; CHECK-NEXT: ret
+ %ret = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
+ ret <4 x i32> %ret
+}
+
+define <2 x i64> @vusub_d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vusub_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmax.du $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.d $vr0, $vr0, $vr1
+; CHECK-NEXT: ret
+ %ret = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i64> %ret
+}
+
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.usub.sat.v2i64(<2 x i64>, <2 x i64>)
More information about the llvm-commits
mailing list