[llvm] 99ed6ce - [SelectionDAG][RISCV] Add tests showing missed scalable-splat optimizations

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 22 23:07:16 PDT 2021


Author: Fraser Cormack
Date: 2021-07-23T06:58:16+01:00
New Revision: 99ed6ce2bdf062db9277a309b7c307fd22fadf9d

URL: https://github.com/llvm/llvm-project/commit/99ed6ce2bdf062db9277a309b7c307fd22fadf9d
DIFF: https://github.com/llvm/llvm-project/commit/99ed6ce2bdf062db9277a309b7c307fd22fadf9d.diff

LOG: [SelectionDAG][RISCV] Add tests showing missed scalable-splat optimizations

These tests show missed opportunities in the SelectionDAG layer when
dealing with scalable-vector splats. All of these are handled for the
equivalent `ISD::BUILD_VECTOR` code, and the tests have largely been
translated from the equivalent X86 tests.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D106574

Added: 
    llvm/test/CodeGen/RISCV/rvv/combine-splats.ll
    llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll
new file mode 100644
index 000000000000..0fe2dd4a2ba8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll
@@ -0,0 +1,116 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-v < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-v < %s | FileCheck %s
+
+; fold (and (or x, C), D) -> D if (C & D) == D
+
+define <vscale x 4 x i32> @and_or_nxv4i32(<vscale x 4 x i32> %A) {
+; CHECK-LABEL: and_or_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a0, zero, 255
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vor.vx v26, v8, a0
+; CHECK-NEXT:    vand.vi v8, v26, 8
+; CHECK-NEXT:    ret
+  %ins1 = insertelement <vscale x 4 x i32> poison, i32 255, i32 0
+  %splat1 = shufflevector <vscale x 4 x i32> %ins1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %ins2 = insertelement <vscale x 4 x i32> poison, i32 8, i32 0
+  %splat2 = shufflevector <vscale x 4 x i32> %ins2, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %v1 = or <vscale x 4 x i32> %A, %splat1
+  %v2 = and <vscale x 4 x i32> %v1, %splat2
+  ret <vscale x 4 x i32> %v2
+}
+
+; (or (and X, c1), c2) -> (and (or X, c2), c1|c2) iff (c1 & c2) != 0
+
+define <vscale x 2 x i64> @or_and_nxv2i64(<vscale x 2 x i64> %a0) {
+; CHECK-LABEL: or_and_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; CHECK-NEXT:    vand.vi v26, v8, 7
+; CHECK-NEXT:    vor.vi v8, v26, 3
+; CHECK-NEXT:    ret
+  %ins1 = insertelement <vscale x 2 x i64> poison, i64 7, i32 0
+  %splat1 = shufflevector <vscale x 2 x i64> %ins1, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %ins2 = insertelement <vscale x 2 x i64> poison, i64 3, i32 0
+  %splat2 = shufflevector <vscale x 2 x i64> %ins2, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %v1 = and <vscale x 2 x i64> %a0, %splat1
+  %v2 = or <vscale x 2 x i64> %v1, %splat2
+  ret <vscale x 2 x i64> %v2
+}
+
+; If all masked bits are going to be set, that's a constant fold.
+
+define <vscale x 2 x i64> @or_and_nxv2i64_fold(<vscale x 2 x i64> %a0) {
+; CHECK-LABEL: or_and_nxv2i64_fold:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; CHECK-NEXT:    vand.vi v26, v8, 1
+; CHECK-NEXT:    vor.vi v8, v26, 3
+; CHECK-NEXT:    ret
+  %ins1 = insertelement <vscale x 2 x i64> poison, i64 1, i32 0
+  %splat1 = shufflevector <vscale x 2 x i64> %ins1, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %ins2 = insertelement <vscale x 2 x i64> poison, i64 3, i32 0
+  %splat2 = shufflevector <vscale x 2 x i64> %ins2, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %v1 = and <vscale x 2 x i64> %a0, %splat1
+  %v2 = or <vscale x 2 x i64> %v1, %splat2
+  ret <vscale x 2 x i64> %v2
+}
+
+; fold (shl (shl x, c1), c2) -> (shl x, (add c1, c2))
+
+define <vscale x 4 x i32> @combine_vec_shl_shl(<vscale x 4 x i32> %x) {
+; CHECK-LABEL: combine_vec_shl_shl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vmv.s.x v26, a0
+; CHECK-NEXT:    addi a0, zero, 4
+; CHECK-NEXT:    vmv.s.x v28, a0
+; CHECK-NEXT:    vsll.vv v26, v8, v26
+; CHECK-NEXT:    vsll.vv v8, v26, v28
+; CHECK-NEXT:    ret
+  %ins1 = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
+  %splat1 = shufflevector <vscale x 4 x i32> %ins1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %ins2 = insertelement <vscale x 4 x i32> poison, i32 4, i32 0
+  %splat2 = shufflevector <vscale x 4 x i32> %ins2, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %v1 = shl <vscale x 4 x i32> %x, %ins1
+  %v2 = shl <vscale x 4 x i32> %v1, %ins2
+  ret <vscale x 4 x i32> %v2
+}
+
+; fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
+
+define <vscale x 2 x i32> @combine_vec_ashr_ashr(<vscale x 2 x i32> %x) {
+; CHECK-LABEL: combine_vec_ashr_ashr:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vsra.vi v25, v8, 2
+; CHECK-NEXT:    vsra.vi v8, v25, 4
+; CHECK-NEXT:    ret
+  %ins1 = insertelement <vscale x 2 x i32> poison, i32 2, i32 0
+  %splat1 = shufflevector <vscale x 2 x i32> %ins1, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+  %ins2 = insertelement <vscale x 2 x i32> poison, i32 4, i32 0
+  %splat2 = shufflevector <vscale x 2 x i32> %ins2, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+  %v1 = ashr <vscale x 2 x i32> %x, %splat1
+  %v2 = ashr <vscale x 2 x i32> %v1, %splat2
+  ret <vscale x 2 x i32> %v2
+}
+
+; fold (srl (srl x, c1), c2) -> (srl x, (add c1, c2))
+
+define <vscale x 8 x i16> @combine_vec_lshr_lshr(<vscale x 8 x i16> %x) {
+; CHECK-LABEL: combine_vec_lshr_lshr:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vsrl.vi v26, v8, 4
+; CHECK-NEXT:    vsrl.vi v8, v26, 4
+; CHECK-NEXT:    ret
+  %ins1 = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
+  %splat1 = shufflevector <vscale x 8 x i16> %ins1, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %ins2 = insertelement <vscale x 8 x i16> poison, i16 4, i32 0
+  %splat2 = shufflevector <vscale x 8 x i16> %ins2, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %v1 = lshr <vscale x 8 x i16> %x, %splat2
+  %v2 = lshr <vscale x 8 x i16> %v1, %splat2
+  ret <vscale x 8 x i16> %v2
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll b/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll
new file mode 100644
index 000000000000..da7334b0bae9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll
@@ -0,0 +1,155 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-v < %s | FileCheck %s --check-prefix=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-v < %s | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i16> @test_urem_vec_even_divisor_eq0(<vscale x 1 x i16> %x) nounwind {
+; RV32-LABEL: test_urem_vec_even_divisor_eq0:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 1048571
+; RV32-NEXT:    addi a0, a0, -1365
+; RV32-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
+; RV32-NEXT:    vmulhu.vx v25, v8, a0
+; RV32-NEXT:    vsrl.vi v25, v25, 2
+; RV32-NEXT:    addi a0, zero, 6
+; RV32-NEXT:    vnmsub.vx v25, a0, v8
+; RV32-NEXT:    vmv.v.i v26, 0
+; RV32-NEXT:    vmsne.vi v0, v25, 0
+; RV32-NEXT:    vmerge.vim v8, v26, -1, v0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_vec_even_divisor_eq0:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 1048571
+; RV64-NEXT:    addiw a0, a0, -1365
+; RV64-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
+; RV64-NEXT:    vmulhu.vx v25, v8, a0
+; RV64-NEXT:    vsrl.vi v25, v25, 2
+; RV64-NEXT:    addi a0, zero, 6
+; RV64-NEXT:    vnmsub.vx v25, a0, v8
+; RV64-NEXT:    vmv.v.i v26, 0
+; RV64-NEXT:    vmsne.vi v0, v25, 0
+; RV64-NEXT:    vmerge.vim v8, v26, -1, v0
+; RV64-NEXT:    ret
+  %ins1 = insertelement <vscale x 1 x i16> poison, i16 6, i32 0
+  %splat1 = shufflevector <vscale x 1 x i16> %ins1, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %urem = urem <vscale x 1 x i16> %x, %splat1
+  %ins2 = insertelement <vscale x 1 x i16> poison, i16 0, i32 0
+  %splat2 = shufflevector <vscale x 1 x i16> %ins2, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %cmp = icmp ne <vscale x 1 x i16> %urem, %splat2
+  %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %ext
+}
+
+define <vscale x 1 x i16> @test_urem_vec_odd_divisor_eq0(<vscale x 1 x i16> %x) nounwind {
+; RV32-LABEL: test_urem_vec_odd_divisor_eq0:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 1048573
+; RV32-NEXT:    addi a0, a0, -819
+; RV32-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
+; RV32-NEXT:    vmulhu.vx v25, v8, a0
+; RV32-NEXT:    vsrl.vi v25, v25, 2
+; RV32-NEXT:    addi a0, zero, 5
+; RV32-NEXT:    vnmsub.vx v25, a0, v8
+; RV32-NEXT:    vmv.v.i v26, 0
+; RV32-NEXT:    vmsne.vi v0, v25, 0
+; RV32-NEXT:    vmerge.vim v8, v26, -1, v0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_vec_odd_divisor_eq0:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 1048573
+; RV64-NEXT:    addiw a0, a0, -819
+; RV64-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
+; RV64-NEXT:    vmulhu.vx v25, v8, a0
+; RV64-NEXT:    vsrl.vi v25, v25, 2
+; RV64-NEXT:    addi a0, zero, 5
+; RV64-NEXT:    vnmsub.vx v25, a0, v8
+; RV64-NEXT:    vmv.v.i v26, 0
+; RV64-NEXT:    vmsne.vi v0, v25, 0
+; RV64-NEXT:    vmerge.vim v8, v26, -1, v0
+; RV64-NEXT:    ret
+  %ins1 = insertelement <vscale x 1 x i16> poison, i16 5, i32 0
+  %splat1 = shufflevector <vscale x 1 x i16> %ins1, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %urem = urem <vscale x 1 x i16> %x, %splat1
+  %ins2 = insertelement <vscale x 1 x i16> poison, i16 0, i32 0
+  %splat2 = shufflevector <vscale x 1 x i16> %ins2, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %cmp = icmp ne <vscale x 1 x i16> %urem, %splat2
+  %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %ext
+}
+
+define <vscale x 1 x i16> @test_urem_vec_even_divisor_eq1(<vscale x 1 x i16> %x) nounwind {
+; RV32-LABEL: test_urem_vec_even_divisor_eq1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 1048571
+; RV32-NEXT:    addi a0, a0, -1365
+; RV32-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
+; RV32-NEXT:    vmulhu.vx v25, v8, a0
+; RV32-NEXT:    vsrl.vi v25, v25, 2
+; RV32-NEXT:    addi a0, zero, 6
+; RV32-NEXT:    vnmsub.vx v25, a0, v8
+; RV32-NEXT:    vmsne.vi v0, v25, 1
+; RV32-NEXT:    vmv.v.i v25, 0
+; RV32-NEXT:    vmerge.vim v8, v25, -1, v0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_vec_even_divisor_eq1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 1048571
+; RV64-NEXT:    addiw a0, a0, -1365
+; RV64-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
+; RV64-NEXT:    vmulhu.vx v25, v8, a0
+; RV64-NEXT:    vsrl.vi v25, v25, 2
+; RV64-NEXT:    addi a0, zero, 6
+; RV64-NEXT:    vnmsub.vx v25, a0, v8
+; RV64-NEXT:    vmsne.vi v0, v25, 1
+; RV64-NEXT:    vmv.v.i v25, 0
+; RV64-NEXT:    vmerge.vim v8, v25, -1, v0
+; RV64-NEXT:    ret
+  %ins1 = insertelement <vscale x 1 x i16> poison, i16 6, i32 0
+  %splat1 = shufflevector <vscale x 1 x i16> %ins1, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %urem = urem <vscale x 1 x i16> %x, %splat1
+  %ins2 = insertelement <vscale x 1 x i16> poison, i16 1, i32 0
+  %splat2 = shufflevector <vscale x 1 x i16> %ins2, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %cmp = icmp ne <vscale x 1 x i16> %urem, %splat2
+  %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %ext
+}
+
+define <vscale x 1 x i16> @test_urem_vec_odd_divisor_eq1(<vscale x 1 x i16> %x) nounwind {
+; RV32-LABEL: test_urem_vec_odd_divisor_eq1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 1048573
+; RV32-NEXT:    addi a0, a0, -819
+; RV32-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
+; RV32-NEXT:    vmulhu.vx v25, v8, a0
+; RV32-NEXT:    vsrl.vi v25, v25, 2
+; RV32-NEXT:    addi a0, zero, 5
+; RV32-NEXT:    vnmsub.vx v25, a0, v8
+; RV32-NEXT:    vmsne.vi v0, v25, 1
+; RV32-NEXT:    vmv.v.i v25, 0
+; RV32-NEXT:    vmerge.vim v8, v25, -1, v0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_vec_odd_divisor_eq1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 1048573
+; RV64-NEXT:    addiw a0, a0, -819
+; RV64-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
+; RV64-NEXT:    vmulhu.vx v25, v8, a0
+; RV64-NEXT:    vsrl.vi v25, v25, 2
+; RV64-NEXT:    addi a0, zero, 5
+; RV64-NEXT:    vnmsub.vx v25, a0, v8
+; RV64-NEXT:    vmsne.vi v0, v25, 1
+; RV64-NEXT:    vmv.v.i v25, 0
+; RV64-NEXT:    vmerge.vim v8, v25, -1, v0
+; RV64-NEXT:    ret
+  %ins1 = insertelement <vscale x 1 x i16> poison, i16 5, i32 0
+  %splat1 = shufflevector <vscale x 1 x i16> %ins1, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %urem = urem <vscale x 1 x i16> %x, %splat1
+  %ins2 = insertelement <vscale x 1 x i16> poison, i16 1, i32 0
+  %splat2 = shufflevector <vscale x 1 x i16> %ins2, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %cmp = icmp ne <vscale x 1 x i16> %urem, %splat2
+  %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %ext
+}


        


More information about the llvm-commits mailing list