[llvm] ce6e66d - [RISCV] Add coverage for an upcoming select lowering change

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 17 12:16:45 PST 2025


Author: Philip Reames
Date: 2025-01-17T12:16:16-08:00
New Revision: ce6e66ddecbd1ddfa3be9be2ac881931d5ae71a4

URL: https://github.com/llvm/llvm-project/commit/ce6e66ddecbd1ddfa3be9be2ac881931d5ae71a4
DIFF: https://github.com/llvm/llvm-project/commit/ce6e66ddecbd1ddfa3be9be2ac881931d5ae71a4.diff

LOG: [RISCV] Add coverage for an upcoming select lowering change

A select between an add and a sub can be either a vrsub followed by
add (reducing register pressure), or a vmacc.  The former will be
implemented in an upcoming review.

Added: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-addsub.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-addsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-addsub.ll
new file mode 100644
index 00000000000000..008c39df708394
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-addsub.ll
@@ -0,0 +1,351 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+
+define <1 x i32> @select_addsub_v1i32(<1 x i1> %cc, <1 x i32> %a, <1 x i32> %b) {
+; CHECK-LABEL: select_addsub_v1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
+; CHECK-NEXT:    vadd.vv v10, v8, v9
+; CHECK-NEXT:    vsub.vv v10, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <1 x i32> %a, %b
+  %sub = add nsw <1 x i32> %a, %b
+  %res = select <1 x i1> %cc,  <1 x i32> %add, <1 x i32> %sub
+  ret <1 x i32> %res
+}
+
+define <2 x i32> @select_addsub_v2i32(<2 x i1> %cc, <2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: select_addsub_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vadd.vv v10, v8, v9
+; CHECK-NEXT:    vsub.vv v10, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <2 x i32> %a, %b
+  %sub = add nsw <2 x i32> %a, %b
+  %res = select <2 x i1> %cc,  <2 x i32> %add, <2 x i32> %sub
+  ret <2 x i32> %res
+}
+
+define <4 x i32> @select_addsub_v4i32(<4 x i1> %cc, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: select_addsub_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vadd.vv v10, v8, v9
+; CHECK-NEXT:    vsub.vv v10, v8, v9, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <4 x i32> %a, %b
+  %sub = add nsw <4 x i32> %a, %b
+  %res = select <4 x i1> %cc,  <4 x i32> %add, <4 x i32> %sub
+  ret <4 x i32> %res
+}
+
+define <4 x i32> @select_addsub_v4i32_select_swapped(<4 x i1> %cc, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: select_addsub_v4i32_select_swapped:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vsub.vv v10, v8, v9
+; CHECK-NEXT:    vadd.vv v10, v8, v9, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <4 x i32> %a, %b
+  %sub = add nsw <4 x i32> %a, %b
+  %res = select <4 x i1> %cc,  <4 x i32> %sub, <4 x i32> %add
+  ret <4 x i32> %res
+}
+
+define <4 x i32> @select_addsub_v4i32_add_swapped(<4 x i1> %cc, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: select_addsub_v4i32_add_swapped:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vadd.vv v10, v9, v8
+; CHECK-NEXT:    vsub.vv v10, v8, v9, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <4 x i32> %a, %b
+  %sub = add nsw <4 x i32> %b, %a
+  %res = select <4 x i1> %cc,  <4 x i32> %add, <4 x i32> %sub
+  ret <4 x i32> %res
+}
+
+define <4 x i32> @select_addsub_v4i32_both_swapped(<4 x i1> %cc, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: select_addsub_v4i32_both_swapped:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vsub.vv v10, v8, v9
+; CHECK-NEXT:    vadd.vv v10, v9, v8, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <4 x i32> %a, %b
+  %sub = add nsw <4 x i32> %b, %a
+  %res = select <4 x i1> %cc,  <4 x i32> %sub, <4 x i32> %add
+  ret <4 x i32> %res
+}
+
+define <4 x i32> @select_addsub_v4i32_sub_swapped(<4 x i1> %cc, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: select_addsub_v4i32_sub_swapped:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vadd.vv v10, v9, v8
+; CHECK-NEXT:    vsub.vv v10, v8, v9, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <4 x i32> %a, %b
+  %sub = add nsw <4 x i32> %b, %a
+  %res = select <4 x i1> %cc,  <4 x i32> %add, <4 x i32> %sub
+  ret <4 x i32> %res
+}
+
+define <8 x i32> @select_addsub_v8i32(<8 x i1> %cc, <8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: select_addsub_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT:    vadd.vv v12, v8, v10
+; CHECK-NEXT:    vsub.vv v12, v8, v10, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+  %add = sub nsw <8 x i32> %a, %b
+  %sub = add nsw <8 x i32> %a, %b
+  %res = select <8 x i1> %cc,  <8 x i32> %add, <8 x i32> %sub
+  ret <8 x i32> %res
+}
+
+define <16 x i32> @select_addsub_v16i32(<16 x i1> %cc, <16 x i32> %a, <16 x i32> %b) {
+; CHECK-LABEL: select_addsub_v16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
+; CHECK-NEXT:    vadd.vv v16, v8, v12
+; CHECK-NEXT:    vsub.vv v16, v8, v12, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+  %add = sub nsw <16 x i32> %a, %b
+  %sub = add nsw <16 x i32> %a, %b
+  %res = select <16 x i1> %cc,  <16 x i32> %add, <16 x i32> %sub
+  ret <16 x i32> %res
+}
+
+define <32 x i32> @select_addsub_v32i32(<32 x i1> %cc, <32 x i32> %a, <32 x i32> %b) {
+; CHECK-LABEL: select_addsub_v32i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vadd.vv v24, v8, v16
+; CHECK-NEXT:    vsub.vv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v24
+; CHECK-NEXT:    ret
+  %add = sub nsw <32 x i32> %a, %b
+  %sub = add nsw <32 x i32> %a, %b
+  %res = select <32 x i1> %cc,  <32 x i32> %add, <32 x i32> %sub
+  ret <32 x i32> %res
+}
+
+define <64 x i32> @select_addsub_v64i32(<64 x i1> %cc, <64 x i32> %a, <64 x i32> %b) {
+; CHECK-LABEL: select_addsub_v64i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    mv a2, a1
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    add a1, a1, a2
+; CHECK-NEXT:    sub sp, sp, a1
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 4
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vle32.v v16, (a0)
+; CHECK-NEXT:    addi a0, a0, 128
+; CHECK-NEXT:    vle32.v v24, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vadd.vv v24, v8, v16
+; CHECK-NEXT:    vsub.vv v24, v8, v16, v0.t
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v0, v0, 4
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vadd.vv v16, v16, v8
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vsub.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    mv a1, a0
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
+; CHECK-NEXT:    ret
+  %add = sub nsw <64 x i32> %a, %b
+  %sub = add nsw <64 x i32> %a, %b
+  %res = select <64 x i1> %cc,  <64 x i32> %add, <64 x i32> %sub
+  ret <64 x i32> %res
+}
+
+define <8 x i64> @select_addsub_v8i64(<8 x i1> %cc, <8 x i64> %a, <8 x i64> %b) {
+; CHECK-LABEL: select_addsub_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vadd.vv v16, v8, v12
+; CHECK-NEXT:    vsub.vv v16, v8, v12, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+  %add = sub nsw <8 x i64> %a, %b
+  %sub = add nsw <8 x i64> %a, %b
+  %res = select <8 x i1> %cc,  <8 x i64> %add, <8 x i64> %sub
+  ret <8 x i64> %res
+}
+
+define <8 x i16> @select_addsub_v8i16(<8 x i1> %cc, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: select_addsub_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT:    vadd.vv v10, v8, v9
+; CHECK-NEXT:    vsub.vv v10, v8, v9, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <8 x i16> %a, %b
+  %sub = add nsw <8 x i16> %a, %b
+  %res = select <8 x i1> %cc,  <8 x i16> %add, <8 x i16> %sub
+  ret <8 x i16> %res
+}
+
+define <8 x i8> @select_addsub_v8i8(<8 x i1> %cc, <8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: select_addsub_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vv v10, v8, v9
+; CHECK-NEXT:    vsub.vv v10, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <8 x i8> %a, %b
+  %sub = add nsw <8 x i8> %a, %b
+  %res = select <8 x i1> %cc,  <8 x i8> %add, <8 x i8> %sub
+  ret <8 x i8> %res
+}
+
+define <8 x i1> @select_addsub_v8i1(<8 x i1> %cc, <8 x i1> %a, <8 x i1> %b) {
+; CHECK-LABEL: select_addsub_v8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmxor.mm v0, v8, v9
+; CHECK-NEXT:    ret
+  %add = sub nsw <8 x i1> %a, %b
+  %sub = add nsw <8 x i1> %a, %b
+  %res = select <8 x i1> %cc,  <8 x i1> %add, <8 x i1> %sub
+  ret <8 x i1> %res
+}
+
+define <8 x i2> @select_addsub_v8i2(<8 x i1> %cc, <8 x i2> %a, <8 x i2> %b) {
+; CHECK-LABEL: select_addsub_v8i2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vv v10, v8, v9
+; CHECK-NEXT:    vsub.vv v10, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <8 x i2> %a, %b
+  %sub = add nsw <8 x i2> %a, %b
+  %res = select <8 x i1> %cc,  <8 x i2> %add, <8 x i2> %sub
+  ret <8 x i2> %res
+}
+
+define <4 x i32> @select_addsub_v4i32_constmask(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: select_addsub_v4i32_constmask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vmv.v.i v0, 5
+; CHECK-NEXT:    vadd.vv v10, v8, v9
+; CHECK-NEXT:    vsub.vv v10, v8, v9, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <4 x i32> %a, %b
+  %sub = add nsw <4 x i32> %a, %b
+  %res = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>,  <4 x i32> %add, <4 x i32> %sub
+  ret <4 x i32> %res
+}
+
+define <4 x i32> @select_addsub_v4i32_constmask2(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: select_addsub_v4i32_constmask2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vmv.v.i v0, 5
+; CHECK-NEXT:    vadd.vv v10, v9, v8
+; CHECK-NEXT:    vsub.vv v10, v8, v9, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <4 x i32> %a, %b
+  %sub = add nsw <4 x i32> %b, %a
+  %res = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>,  <4 x i32> %add, <4 x i32> %sub
+  ret <4 x i32> %res
+}
+
+; Same pattern as above, but the select is disguised as a shuffle
+define <4 x i32> @select_addsub_v4i32_as_shuffle(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: select_addsub_v4i32_as_shuffle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vmv.v.i v0, 5
+; CHECK-NEXT:    vadd.vv v10, v8, v9
+; CHECK-NEXT:    vsub.vv v10, v8, v9, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <4 x i32> %a, %b
+  %sub = add nsw <4 x i32> %a, %b
+  %res = shufflevector <4 x i32> %add, <4 x i32> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+  ret <4 x i32> %res
+}
+
+; Same pattern as above, but the select is disguised as a shuffle
+define <4 x i32> @select_addsub_v4i32_as_shuffle2(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: select_addsub_v4i32_as_shuffle2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vmv.v.i v0, 5
+; CHECK-NEXT:    vadd.vv v10, v8, v9
+; CHECK-NEXT:    vsub.vv v10, v9, v8, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %add = sub nsw <4 x i32> %b, %a
+  %sub = add nsw <4 x i32> %a, %b
+  %res = shufflevector <4 x i32> %add, <4 x i32> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+  ret <4 x i32> %res
+}


        


More information about the llvm-commits mailing list