[llvm] e618a79 - [RISCV][test] Add (add x, C) -> (sub x, -C) multi-use and vector tests

Piotr Fusik via llvm-commits llvm-commits at lists.llvm.org
Mon May 12 21:25:57 PDT 2025


Author: Piotr Fusik
Date: 2025-05-13T06:25:05+02:00
New Revision: e618a79711a7aefa442f8a417aceeac2ceadbcdd

URL: https://github.com/llvm/llvm-project/commit/e618a79711a7aefa442f8a417aceeac2ceadbcdd
DIFF: https://github.com/llvm/llvm-project/commit/e618a79711a7aefa442f8a417aceeac2ceadbcdd.diff

LOG: [RISCV][test] Add (add x, C) -> (sub x, -C) multi-use and vector tests

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll b/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll
index ddcf4e1a8aa77..8c251c0fe9a9e 100644
--- a/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll
+++ b/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll
@@ -56,6 +56,22 @@ define i64 @add_multiuse(i64 %x) {
 ; CHECK-NEXT:    and a0, a0, a1
 ; CHECK-NEXT:    ret
   %add = add i64 %x, -1099511627775
-  %xor = and i64 %add, -1099511627775
+  %and = and i64 %add, -1099511627775
+  ret i64 %and
+}
+
+define i64 @add_multiuse_const(i64 %x, i64 %y) {
+; CHECK-LABEL: add_multiuse_const:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, -1
+; CHECK-NEXT:    slli a2, a2, 40
+; CHECK-NEXT:    addi a2, a2, 1
+; CHECK-NEXT:    add a0, a0, a2
+; CHECK-NEXT:    add a1, a1, a2
+; CHECK-NEXT:    xor a0, a0, a1
+; CHECK-NEXT:    ret
+  %a = add i64 %x, -1099511627775
+  %b = add i64 %y, -1099511627775
+  %xor = xor i64 %a, %b
   ret i64 %xor
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
index b6490e0155254..1c62e0fab977b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
@@ -431,3 +431,61 @@ define void @vadd_vx_v16i64(ptr %a, i64 %b, ptr %c) {
   store <16 x i64> %vc, ptr %c
   ret void
 }
+
+define <2 x i64> @vadd_vx_v2i64_to_sub(<2 x i64> %va) {
+; RV32-LABEL: vadd_vx_v2i64_to_sub:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    li a0, -256
+; RV32-NEXT:    li a1, 1
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vadd.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vadd_vx_v2i64_to_sub:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, -1
+; RV64-NEXT:    slli a0, a0, 40
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vadd.vx v8, v8, a0
+; RV64-NEXT:    ret
+  %v = add <2 x i64> splat (i64 -1099511627775), %va
+  ret <2 x i64> %v
+}
+
+define <2 x i64> @vadd_vx_v2i64_to_sub_swapped(<2 x i64> %va) {
+; RV32-LABEL: vadd_vx_v2i64_to_sub_swapped:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    li a0, -256
+; RV32-NEXT:    li a1, 1
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vadd.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vadd_vx_v2i64_to_sub_swapped:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, -1
+; RV64-NEXT:    slli a0, a0, 40
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vadd.vx v8, v8, a0
+; RV64-NEXT:    ret
+  %v = add <2 x i64> %va, splat (i64 -1099511627775)
+  ret <2 x i64> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index 7ee8179acfdb9..1151123f6a18b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -1425,3 +1425,59 @@ define <32 x i64> @vadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
   %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
   ret <32 x i64> %v
 }
+
+define <2 x i64> @vadd_vx_v2i64_to_sub(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) nounwind {
+; RV32-LABEL: vadd_vx_v2i64_to_sub:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    li a1, -256
+; RV32-NEXT:    li a2, 1
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vadd.vv v8, v9, v8, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vadd_vx_v2i64_to_sub:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, -1
+; RV64-NEXT:    slli a1, a1, 40
+; RV64-NEXT:    addi a1, a1, 1
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT:    vadd.vx v8, v8, a1, v0.t
+; RV64-NEXT:    ret
+  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> splat (i64 -1099511627775), <2 x i64> %va, <2 x i1> %m, i32 %evl)
+  ret <2 x i64> %v
+}
+
+define <2 x i64> @vadd_vx_v2i64_to_sub_swapped(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) nounwind {
+; RV32-LABEL: vadd_vx_v2i64_to_sub_swapped:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    li a1, -256
+; RV32-NEXT:    li a2, 1
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vadd_vx_v2i64_to_sub_swapped:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, -1
+; RV64-NEXT:    slli a1, a1, 40
+; RV64-NEXT:    addi a1, a1, 1
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT:    vadd.vx v8, v8, a1, v0.t
+; RV64-NEXT:    ret
+  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1099511627775), <2 x i1> %m, i32 %evl)
+  ret <2 x i64> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
index ac22e11d30cdc..a95ad7f744af3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
@@ -865,3 +865,57 @@ define <vscale x 8 x i32> @vadd_vv_mask_negative1_nxv8i32(<vscale x 8 x i32> %va
   %vd = add <vscale x 8 x i32> %vc, %vs
   ret <vscale x 8 x i32> %vd
 }
+
+define <vscale x 1 x i64> @vadd_vx_imm64_to_sub(<vscale x 1 x i64> %va) nounwind {
+; RV32-LABEL: vadd_vx_imm64_to_sub:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    li a0, -256
+; RV32-NEXT:    li a1, 1
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vadd.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vadd_vx_imm64_to_sub:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, -1
+; RV64-NEXT:    slli a0, a0, 40
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV64-NEXT:    vadd.vx v8, v8, a0
+; RV64-NEXT:    ret
+  %vc = add <vscale x 1 x i64> splat (i64 -1099511627775), %va
+  ret <vscale x 1 x i64> %vc
+}
+
+define <vscale x 1 x i64> @vadd_vx_imm64_to_sub_swapped(<vscale x 1 x i64> %va) nounwind {
+; RV32-LABEL: vadd_vx_imm64_to_sub_swapped:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    li a0, -256
+; RV32-NEXT:    li a1, 1
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vadd.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vadd_vx_imm64_to_sub_swapped:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, -1
+; RV64-NEXT:    slli a0, a0, 40
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV64-NEXT:    vadd.vx v8, v8, a0
+; RV64-NEXT:    ret
+  %vc = add <vscale x 1 x i64> %va, splat (i64 -1099511627775)
+  ret <vscale x 1 x i64> %vc
+}


        


More information about the llvm-commits mailing list