[llvm] 4166df2 - [RISCV][test] Add tests for vector subtraction if above threshold
Piotr Fusik via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 17 06:22:07 PDT 2025
Author: Piotr Fusik
Date: 2025-07-17T15:21:42+02:00
New Revision: 4166df2073b6b3e5c7ab0c25d9bc73980b50ea31
URL: https://github.com/llvm/llvm-project/commit/4166df2073b6b3e5c7ab0c25d9bc73980b50ea31
DIFF: https://github.com/llvm/llvm-project/commit/4166df2073b6b3e5c7ab0c25d9bc73980b50ea31.diff
LOG: [RISCV][test] Add tests for vector subtraction if above threshold
Added:
Modified:
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 0c30cbe4a42ef..35b9457fbc1ff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -5707,3 +5707,217 @@ define void @msub_vv_v2i64_2(ptr %x, <2 x i64> %y) {
store <2 x i64> %c, ptr %x
ret void
}
+
+define <8 x i8> @vsub_if_uge_v8i8(<8 x i8> %va, <8 x i8> %vb) {
+; CHECK-LABEL: vsub_if_uge_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v9
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %cmp = icmp ult <8 x i8> %va, %vb
+ %select = select <8 x i1> %cmp, <8 x i8> zeroinitializer, <8 x i8> %vb
+ %sub = sub nuw <8 x i8> %va, %select
+ ret <8 x i8> %sub
+}
+
+define <8 x i8> @vsub_if_uge_swapped_v8i8(<8 x i8> %va, <8 x i8> %vb) {
+; CHECK-LABEL: vsub_if_uge_swapped_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp uge <8 x i8> %va, %vb
+ %select = select <8 x i1> %cmp, <8 x i8> %vb, <8 x i8> zeroinitializer
+ %sub = sub nuw <8 x i8> %va, %select
+ ret <8 x i8> %sub
+}
+
+define <8 x i16> @vsub_if_uge_v8i16(<8 x i16> %va, <8 x i16> %vb) {
+; CHECK-LABEL: vsub_if_uge_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v9
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %cmp = icmp ult <8 x i16> %va, %vb
+ %select = select <8 x i1> %cmp, <8 x i16> zeroinitializer, <8 x i16> %vb
+ %sub = sub nuw <8 x i16> %va, %select
+ ret <8 x i16> %sub
+}
+
+define <8 x i16> @vsub_if_uge_swapped_v8i16(<8 x i16> %va, <8 x i16> %vb) {
+; CHECK-LABEL: vsub_if_uge_swapped_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp uge <8 x i16> %va, %vb
+ %select = select <8 x i1> %cmp, <8 x i16> %vb, <8 x i16> zeroinitializer
+ %sub = sub nuw <8 x i16> %va, %select
+ ret <8 x i16> %sub
+}
+
+define <4 x i32> @vsub_if_uge_v4i32(<4 x i32> %va, <4 x i32> %vb) {
+; CHECK-LABEL: vsub_if_uge_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v9
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %cmp = icmp ult <4 x i32> %va, %vb
+ %select = select <4 x i1> %cmp, <4 x i32> zeroinitializer, <4 x i32> %vb
+ %sub = sub nuw <4 x i32> %va, %select
+ ret <4 x i32> %sub
+}
+
+define <4 x i32> @vsub_if_uge_swapped_v4i32(<4 x i32> %va, <4 x i32> %vb) {
+; CHECK-LABEL: vsub_if_uge_swapped_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp uge <4 x i32> %va, %vb
+ %select = select <4 x i1> %cmp, <4 x i32> %vb, <4 x i32> zeroinitializer
+ %sub = sub nuw <4 x i32> %va, %select
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @vsub_if_uge_v2i64(<2 x i64> %va, <2 x i64> %vb) {
+; CHECK-LABEL: vsub_if_uge_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v9
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %cmp = icmp ult <2 x i64> %va, %vb
+ %select = select <2 x i1> %cmp, <2 x i64> zeroinitializer, <2 x i64> %vb
+ %sub = sub nuw <2 x i64> %va, %select
+ ret <2 x i64> %sub
+}
+
+define <2 x i64> @vsub_if_uge_swapped_v2i64(<2 x i64> %va, <2 x i64> %vb) {
+; CHECK-LABEL: vsub_if_uge_swapped_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp uge <2 x i64> %va, %vb
+ %select = select <2 x i1> %cmp, <2 x i64> %vb, <2 x i64> zeroinitializer
+ %sub = sub nuw <2 x i64> %va, %select
+ ret <2 x i64> %sub
+}
+
+define <8 x i8> @sub_if_uge_C_v8i8(<8 x i8> %x) {
+; CHECK-LABEL: sub_if_uge_C_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vmsgtu.vi v0, v8, 12
+; CHECK-NEXT: vadd.vi v8, v8, -13, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp ugt <8 x i8> %x, splat (i8 12)
+ %sub = add <8 x i8> %x, splat (i8 -13)
+ %select = select <8 x i1> %cmp, <8 x i8> %sub, <8 x i8> %x
+ ret <8 x i8> %select
+}
+
+define <8 x i16> @sub_if_uge_C_v8i16(<8 x i16> %x) {
+; CHECK-LABEL: sub_if_uge_C_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 2000
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT: vmsgtu.vx v0, v8, a0
+; CHECK-NEXT: li a0, -2001
+; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp ugt <8 x i16> %x, splat (i16 2000)
+ %sub = add <8 x i16> %x, splat (i16 -2001)
+ %select = select <8 x i1> %cmp, <8 x i16> %sub, <8 x i16> %x
+ ret <8 x i16> %select
+}
+
+define <4 x i32> @sub_if_uge_C_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: sub_if_uge_C_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, 16
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT: vmsgtu.vx v0, v8, a0
+; CHECK-NEXT: lui a0, 1048560
+; CHECK-NEXT: addi a0, a0, 15
+; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp ugt <4 x i32> %x, splat (i32 65520)
+ %sub = add <4 x i32> %x, splat (i32 -65521)
+ %select = select <4 x i1> %cmp, <4 x i32> %sub, <4 x i32> %x
+ ret <4 x i32> %select
+}
+
+define <4 x i32> @sub_if_uge_C_swapped_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: sub_if_uge_C_swapped_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, 16
+; CHECK-NEXT: addi a0, a0, -15
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: lui a0, 1048560
+; CHECK-NEXT: addi a0, a0, 15
+; CHECK-NEXT: vadd.vx v9, v8, a0
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %cmp = icmp ult <4 x i32> %x, splat (i32 65521)
+ %sub = add <4 x i32> %x, splat (i32 -65521)
+ %select = select <4 x i1> %cmp, <4 x i32> %x, <4 x i32> %sub
+ ret <4 x i32> %select
+}
+
+define <2 x i64> @sub_if_uge_C_v2i64(<2 x i64> %x) nounwind {
+; RV32-LABEL: sub_if_uge_C_v2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: li a0, 1
+; RV32-NEXT: lui a1, 172127
+; RV32-NEXT: mv a2, sp
+; RV32-NEXT: addi a1, a1, 512
+; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a0, 4(sp)
+; RV32-NEXT: li a0, -2
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v9, (a2), zero
+; RV32-NEXT: lui a1, 876449
+; RV32-NEXT: addi a1, a1, -513
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a0, 12(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmsltu.vv v0, v9, v8
+; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: sub_if_uge_C_v2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, 2384
+; RV64-NEXT: addi a0, a0, 761
+; RV64-NEXT: slli a0, a0, 9
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT: vmsgtu.vx v0, v8, a0
+; RV64-NEXT: lui a0, 1048278
+; RV64-NEXT: addi a0, a0, -95
+; RV64-NEXT: slli a0, a0, 12
+; RV64-NEXT: addi a0, a0, -513
+; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %cmp = icmp ugt <2 x i64> %x, splat (i64 5000000000)
+ %sub = add <2 x i64> %x, splat (i64 -5000000001)
+ %select = select <2 x i1> %cmp, <2 x i64> %sub, <2 x i64> %x
+ ret <2 x i64> %select
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
index e3b2d6c1efe1f..a21a526e00ec8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
@@ -893,3 +893,217 @@ define <vscale x 8 x i32> @vmin_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %vs
ret <vscale x 8 x i32> %vc
}
+
+define <vscale x 2 x i8> @vsub_if_uge_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
+; CHECK-LABEL: vsub_if_uge_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v9
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %cmp = icmp ult <vscale x 2 x i8> %va, %vb
+ %select = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> zeroinitializer, <vscale x 2 x i8> %vb
+ %sub = sub nuw <vscale x 2 x i8> %va, %select
+ ret <vscale x 2 x i8> %sub
+}
+
+define <vscale x 2 x i8> @vsub_if_uge_swapped_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
+; CHECK-LABEL: vsub_if_uge_swapped_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
+; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp uge <vscale x 2 x i8> %va, %vb
+ %select = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %vb, <vscale x 2 x i8> zeroinitializer
+ %sub = sub nuw <vscale x 2 x i8> %va, %select
+ ret <vscale x 2 x i8> %sub
+}
+
+define <vscale x 2 x i16> @vsub_if_uge_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
+; CHECK-LABEL: vsub_if_uge_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v9
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %cmp = icmp ult <vscale x 2 x i16> %va, %vb
+ %select = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> zeroinitializer, <vscale x 2 x i16> %vb
+ %sub = sub nuw <vscale x 2 x i16> %va, %select
+ ret <vscale x 2 x i16> %sub
+}
+
+define <vscale x 2 x i16> @vsub_if_uge_swapped_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
+; CHECK-LABEL: vsub_if_uge_swapped_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
+; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp uge <vscale x 2 x i16> %va, %vb
+ %select = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %vb, <vscale x 2 x i16> zeroinitializer
+ %sub = sub nuw <vscale x 2 x i16> %va, %select
+ ret <vscale x 2 x i16> %sub
+}
+
+define <vscale x 2 x i32> @vsub_if_uge_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
+; CHECK-LABEL: vsub_if_uge_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v9
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %cmp = icmp ult <vscale x 2 x i32> %va, %vb
+ %select = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> zeroinitializer, <vscale x 2 x i32> %vb
+ %sub = sub nuw <vscale x 2 x i32> %va, %select
+ ret <vscale x 2 x i32> %sub
+}
+
+define <vscale x 2 x i32> @vsub_if_uge_swapped_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
+; CHECK-LABEL: vsub_if_uge_swapped_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp uge <vscale x 2 x i32> %va, %vb
+ %select = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %vb, <vscale x 2 x i32> zeroinitializer
+ %sub = sub nuw <vscale x 2 x i32> %va, %select
+ ret <vscale x 2 x i32> %sub
+}
+
+define <vscale x 2 x i64> @vsub_if_uge_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
+; CHECK-LABEL: vsub_if_uge_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vmsltu.vv v0, v8, v10
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
+; CHECK-NEXT: ret
+ %cmp = icmp ult <vscale x 2 x i64> %va, %vb
+ %select = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i64> %vb
+ %sub = sub nuw <vscale x 2 x i64> %va, %select
+ ret <vscale x 2 x i64> %sub
+}
+
+define <vscale x 2 x i64> @vsub_if_uge_swapped_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
+; CHECK-LABEL: vsub_if_uge_swapped_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
+; CHECK-NEXT: vmsleu.vv v0, v10, v8
+; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp uge <vscale x 2 x i64> %va, %vb
+ %select = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %vb, <vscale x 2 x i64> zeroinitializer
+ %sub = sub nuw <vscale x 2 x i64> %va, %select
+ ret <vscale x 2 x i64> %sub
+}
+
+define <vscale x 2 x i8> @sub_if_uge_C_nxv2i8(<vscale x 2 x i8> %x) {
+; CHECK-LABEL: sub_if_uge_C_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
+; CHECK-NEXT: vmsgtu.vi v0, v8, 12
+; CHECK-NEXT: vadd.vi v8, v8, -13, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp ugt <vscale x 2 x i8> %x, splat (i8 12)
+ %sub = add <vscale x 2 x i8> %x, splat (i8 -13)
+ %select = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %sub, <vscale x 2 x i8> %x
+ ret <vscale x 2 x i8> %select
+}
+
+define <vscale x 2 x i16> @sub_if_uge_C_nxv2i16(<vscale x 2 x i16> %x) {
+; CHECK-LABEL: sub_if_uge_C_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 2000
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
+; CHECK-NEXT: vmsgtu.vx v0, v8, a0
+; CHECK-NEXT: li a0, -2001
+; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp ugt <vscale x 2 x i16> %x, splat (i16 2000)
+ %sub = add <vscale x 2 x i16> %x, splat (i16 -2001)
+ %select = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %sub, <vscale x 2 x i16> %x
+ ret <vscale x 2 x i16> %select
+}
+
+define <vscale x 2 x i32> @sub_if_uge_C_nxv2i32(<vscale x 2 x i32> %x) {
+; CHECK-LABEL: sub_if_uge_C_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, 16
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT: vmsgtu.vx v0, v8, a0
+; CHECK-NEXT: lui a0, 1048560
+; CHECK-NEXT: addi a0, a0, 15
+; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %cmp = icmp ugt <vscale x 2 x i32> %x, splat (i32 65520)
+ %sub = add <vscale x 2 x i32> %x, splat (i32 -65521)
+ %select = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %sub, <vscale x 2 x i32> %x
+ ret <vscale x 2 x i32> %select
+}
+
+define <vscale x 2 x i32> @sub_if_uge_C_swapped_nxv2i32(<vscale x 2 x i32> %x) {
+; CHECK-LABEL: sub_if_uge_C_swapped_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, 16
+; CHECK-NEXT: addi a0, a0, -15
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: lui a0, 1048560
+; CHECK-NEXT: addi a0, a0, 15
+; CHECK-NEXT: vadd.vx v9, v8, a0
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %cmp = icmp ult <vscale x 2 x i32> %x, splat (i32 65521)
+ %sub = add <vscale x 2 x i32> %x, splat (i32 -65521)
+ %select = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %x, <vscale x 2 x i32> %sub
+ ret <vscale x 2 x i32> %select
+}
+
+define <vscale x 2 x i64> @sub_if_uge_C_nxv2i64(<vscale x 2 x i64> %x) nounwind {
+; RV32-LABEL: sub_if_uge_C_nxv2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: li a0, 1
+; RV32-NEXT: lui a1, 172127
+; RV32-NEXT: mv a2, sp
+; RV32-NEXT: addi a1, a1, 512
+; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a0, 4(sp)
+; RV32-NEXT: li a0, -2
+; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v10, (a2), zero
+; RV32-NEXT: lui a1, 876449
+; RV32-NEXT: addi a1, a1, -513
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a0, 12(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmsltu.vv v0, v10, v8
+; RV32-NEXT: vadd.vv v8, v8, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: sub_if_uge_C_nxv2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, 2384
+; RV64-NEXT: addi a0, a0, 761
+; RV64-NEXT: slli a0, a0, 9
+; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
+; RV64-NEXT: vmsgtu.vx v0, v8, a0
+; RV64-NEXT: lui a0, 1048278
+; RV64-NEXT: addi a0, a0, -95
+; RV64-NEXT: slli a0, a0, 12
+; RV64-NEXT: addi a0, a0, -513
+; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %cmp = icmp ugt <vscale x 2 x i64> %x, splat (i64 5000000000)
+ %sub = add <vscale x 2 x i64> %x, splat (i64 -5000000001)
+ %select = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %sub, <vscale x 2 x i64> %x
+ ret <vscale x 2 x i64> %select
+}
More information about the llvm-commits
mailing list