[llvm] [RISCV] Add DAG combine to convert (iN reduce.add (zext (vXi1 A to vXiN)) into vcpop.m (PR #127497)
Sergey Kachkov via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 18 01:36:23 PST 2025
https://github.com/skachkov-sc updated https://github.com/llvm/llvm-project/pull/127497
>From 5da04155c40ac2b87ac5ea981ad10f9de7e3883b Mon Sep 17 00:00:00 2001
From: Sergey Kachkov <sergey.kachkov at syntacore.com>
Date: Mon, 17 Feb 2025 13:44:31 +0300
Subject: [PATCH 1/4] [RISCV][NFC] Add pre-commit test
---
.../RISCV/rvv/combine-reduce-add-to-vcpop.ll | 392 ++++++++++++++++++
1 file changed, 392 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
new file mode 100644
index 0000000000000..bb53622390186
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
@@ -0,0 +1,392 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zbb | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zbb | FileCheck %s --check-prefixes=CHECK,RV64
+
+define i32 @test_nxv2i1(<vscale x 2 x i1> %x) {
+; CHECK-LABEL: test_nxv2i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.s.x v9, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v9
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = zext <vscale x 2 x i1> %x to <vscale x 2 x i32>
+ %b = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> %a)
+ ret i32 %b
+}
+
+define i32 @test_nxv4i1(<vscale x 4 x i1> %x) {
+; CHECK-LABEL: test_nxv4i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.s.x v10, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v10
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = zext <vscale x 4 x i1> %x to <vscale x 4 x i32>
+ %b = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %a)
+ ret i32 %b
+}
+
+define i32 @test_nxv8i1(<vscale x 8 x i1> %x) {
+; CHECK-LABEL: test_nxv8i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.s.x v12, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v12
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = zext <vscale x 8 x i1> %x to <vscale x 8 x i32>
+ %b = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> %a)
+ ret i32 %b
+}
+
+define i32 @test_nxv16i1(<vscale x 16 x i1> %x) {
+; CHECK-LABEL: test_nxv16i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.s.x v16, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v16
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = zext <vscale x 16 x i1> %x to <vscale x 16 x i32>
+ %b = call i32 @llvm.vector.reduce.add.nxv16i32(<vscale x 16 x i32> %a)
+ ret i32 %b
+}
+
+define i32 @test_nxv32i1(<vscale x 32 x i1> %x) {
+; CHECK-LABEL: test_nxv32i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: srli a0, a0, 2
+; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v0, a0
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
+; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: vredsum.vs v8, v16, v8
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = zext <vscale x 32 x i1> %x to <vscale x 32 x i32>
+ %b = call i32 @llvm.vector.reduce.add.nxv32i32(<vscale x 32 x i32> %a)
+ ret i32 %b
+}
+
+define i32 @test_nxv64i1(<vscale x 64 x i1> %x) {
+; CHECK-LABEL: test_nxv64i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v8, v0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: vmv.v.i v24, 0
+; CHECK-NEXT: srli a1, a0, 1
+; CHECK-NEXT: srli a0, a0, 2
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v9, v0, a1
+; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a0
+; CHECK-NEXT: vslidedown.vx v10, v9, a0
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
+; CHECK-NEXT: vmerge.vim v16, v24, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v24, v24, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vadd.vi v24, v24, 1, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
+; CHECK-NEXT: vadd.vv v8, v24, v16
+; CHECK-NEXT: vmv.s.x v16, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v16
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = zext <vscale x 64 x i1> %x to <vscale x 64 x i32>
+ %b = call i32 @llvm.vector.reduce.add.nxv64i32(<vscale x 64 x i32> %a)
+ ret i32 %b
+}
+
+define i32 @test_nxv128i1(<vscale x 128 x i1> %x) {
+; CHECK-LABEL: test_nxv128i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: vmerge.vim v8, v16, 1, v0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: srli a1, a0, 1
+; CHECK-NEXT: srli a0, a0, 2
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v6, v7, a1
+; CHECK-NEXT: vslidedown.vx v5, v0, a1
+; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v4, v7, a0
+; CHECK-NEXT: vslidedown.vx v3, v0, a0
+; CHECK-NEXT: vmv1r.v v0, v5
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmerge.vim v24, v16, 1, v0
+; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v5, a0
+; CHECK-NEXT: vslidedown.vx v5, v6, a0
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
+; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v3
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v4
+; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t
+; CHECK-NEXT: vmv1r.v v0, v5
+; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: vadd.vi v24, v24, 1, v0.t
+; CHECK-NEXT: vmv1r.v v0, v7
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
+; CHECK-NEXT: vadd.vv v16, v16, v24
+; CHECK-NEXT: vadd.vv v8, v16, v8
+; CHECK-NEXT: vmv.s.x v16, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v16
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add sp, sp, a1
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+entry:
+ %a = zext <vscale x 128 x i1> %x to <vscale x 128 x i32>
+ %b = call i32 @llvm.vector.reduce.add.nxv128i32(<vscale x 128 x i32> %a)
+ ret i32 %b
+}
+
+define i32 @test_nxv256i1(<vscale x 256 x i1> %x) {
+; CHECK-LABEL: test_nxv256i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add a1, a1, a0
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x31, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 49 * vlenb
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v3, v10
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv.v.i v24, 0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: vmerge.vim v16, v24, 1, v0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: mv a2, a0
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a2, a2, a0
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: srli a0, a1, 1
+; CHECK-NEXT: srli a1, a1, 2
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v11, v0, a0
+; CHECK-NEXT: vslidedown.vx v12, v8, a0
+; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v2, v9, a1
+; CHECK-NEXT: vslidedown.vx v4, v0, a1
+; CHECK-NEXT: vslidedown.vx v1, v10, a1
+; CHECK-NEXT: vslidedown.vx v7, v8, a1
+; CHECK-NEXT: vslidedown.vx v6, v11, a1
+; CHECK-NEXT: vslidedown.vx v5, v12, a1
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmv8r.v v16, v24
+; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu
+; CHECK-NEXT: vmerge.vim v24, v24, 1, v0
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a3, a2, 5
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: vmerge.vim v24, v16, 1, v0
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: mv a3, a2
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: add a3, a3, a2
+; CHECK-NEXT: slli a2, a2, 1
+; CHECK-NEXT: add a2, a2, a3
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmerge.vim v8, v16, 1, v0
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 4
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: vmerge.vim v8, v16, 1, v0
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv1r.v v0, v5
+; CHECK-NEXT: vmerge.vim v24, v16, 1, v0
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: vmv1r.v v0, v4
+; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v7
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t
+; CHECK-NEXT: vmv1r.v v0, v2
+; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
+; CHECK-NEXT: vadd.vv v8, v16, v8
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: mv a3, a2
+; CHECK-NEXT: slli a2, a2, 1
+; CHECK-NEXT: add a2, a2, a3
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl1r.v v7, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v7, a0
+; CHECK-NEXT: vslidedown.vx v11, v3, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v11, a1
+; CHECK-NEXT: vslidedown.vx v12, v8, a1
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
+; CHECK-NEXT: vadd.vi v24, v24, 1, v0.t
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
+; CHECK-NEXT: vadd.vv v16, v16, v24
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vadd.vi v24, v24, 1, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a1, a1, a0
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a1, a1, a0
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv1r.v v0, v3
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 5
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t
+; CHECK-NEXT: vmv1r.v v0, v7
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a1, a1, a0
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vadd.vi v24, v24, 1, v0.t
+; CHECK-NEXT: vadd.vv v24, v24, v8
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vadd.vv v16, v8, v16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a1, a1, a0
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vadd.vv v8, v24, v8
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vmv.s.x v16, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v16
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a2, a2, a1
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: add sp, sp, a1
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+entry:
+ %a = zext <vscale x 256 x i1> %x to <vscale x 256 x i32>
+ %b = call i32 @llvm.vector.reduce.add.nxv256i32(<vscale x 256 x i32> %a)
+ ret i32 %b
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
>From d0634337393443b8fec60197e9a5e479e15e20ad Mon Sep 17 00:00:00 2001
From: Sergey Kachkov <sergey.kachkov at syntacore.com>
Date: Mon, 17 Feb 2025 13:20:29 +0300
Subject: [PATCH 2/4] [RISCV] Add DAG combine to convert (iN reduce.add (zext
(vXi1 A to vXiN)) into vcpop.m
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 45 ++++++++----
.../RISCV/rvv/combine-reduce-add-to-vcpop.ll | 72 ++++---------------
2 files changed, 43 insertions(+), 74 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index c40ab0d09bdf6..20c2785e421ca 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1542,7 +1542,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::MUL, ISD::SDIV, ISD::UDIV,
ISD::SREM, ISD::UREM, ISD::INSERT_VECTOR_ELT,
ISD::ABS, ISD::CTPOP, ISD::VECTOR_SHUFFLE,
- ISD::VSELECT});
+ ISD::VSELECT, ISD::VECREDUCE_ADD});
if (Subtarget.hasVendorXTHeadMemPair())
setTargetDAGCombine({ISD::LOAD, ISD::STORE});
@@ -18100,25 +18100,38 @@ static SDValue combineTruncToVnclip(SDNode *N, SelectionDAG &DAG,
// (iX ctpop (bitcast (vXi1 A)))
// ->
// (zext (vcpop.m (nxvYi1 (insert_subvec (vXi1 A)))))
+// and
+// (iN reduce.add (zext (vXi1 A to vXiN))
+// ->
+// (zext (vcpop.m (nxvYi1 (insert_subvec (vXi1 A)))))
// FIXME: It's complicated to match all the variations of this after type
// legalization so we only handle the pre-type legalization pattern, but that
// requires the fixed vector type to be legal.
-static SDValue combineScalarCTPOPToVCPOP(SDNode *N, SelectionDAG &DAG,
- const RISCVSubtarget &Subtarget) {
+static SDValue combineVCPOP(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ unsigned Opc = N->getOpcode();
+ assert((Opc == ISD::CTPOP || Opc == ISD::VECREDUCE_ADD) &&
+ "Unexpected opcode");
EVT VT = N->getValueType(0);
if (!VT.isScalarInteger())
return SDValue();
SDValue Src = N->getOperand(0);
- // Peek through zero_extend. It doesn't change the count.
- if (Src.getOpcode() == ISD::ZERO_EXTEND)
- Src = Src.getOperand(0);
+ if (Opc == ISD::CTPOP) {
+ // Peek through zero_extend. It doesn't change the count.
+ if (Src.getOpcode() == ISD::ZERO_EXTEND)
+ Src = Src.getOperand(0);
- if (Src.getOpcode() != ISD::BITCAST)
- return SDValue();
+ if (Src.getOpcode() != ISD::BITCAST)
+ return SDValue();
+ Src = Src.getOperand(0);
+ } else if (Opc == ISD::VECREDUCE_ADD) {
+ if (Src.getOpcode() != ISD::ZERO_EXTEND)
+ return SDValue();
+ Src = Src.getOperand(0);
+ }
- Src = Src.getOperand(0);
EVT SrcEVT = Src.getValueType();
if (!SrcEVT.isSimple())
return SDValue();
@@ -18128,11 +18141,14 @@ static SDValue combineScalarCTPOPToVCPOP(SDNode *N, SelectionDAG &DAG,
if (!SrcMVT.isVector() || SrcMVT.getVectorElementType() != MVT::i1)
return SDValue();
- if (!useRVVForFixedLengthVectorVT(SrcMVT, Subtarget))
- return SDValue();
+ MVT ContainerVT = SrcMVT;
+ if (SrcMVT.isFixedLengthVector()) {
+ if (!useRVVForFixedLengthVectorVT(SrcMVT, Subtarget))
+ return SDValue();
- MVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcMVT, Subtarget);
- Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
+ ContainerVT = getContainerForFixedLengthVector(DAG, SrcMVT, Subtarget);
+ Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
+ }
SDLoc DL(N);
auto [Mask, VL] = getDefaultVLOps(SrcMVT, ContainerVT, DL, DAG, Subtarget);
@@ -19214,7 +19230,8 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
return SDValue();
}
case ISD::CTPOP:
- if (SDValue V = combineScalarCTPOPToVCPOP(N, DAG, Subtarget))
+ case ISD::VECREDUCE_ADD:
+ if (SDValue V = combineVCPOP(N, DAG, Subtarget))
return V;
break;
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
index bb53622390186..7156c9faee611 100644
--- a/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
@@ -5,12 +5,8 @@
define i32 @test_nxv2i1(<vscale x 2 x i1> %x) {
; CHECK-LABEL: test_nxv2i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vmv.s.x v9, zero
-; CHECK-NEXT: vredsum.vs v8, v8, v9
-; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = zext <vscale x 2 x i1> %x to <vscale x 2 x i32>
@@ -21,12 +17,8 @@ entry:
define i32 @test_nxv4i1(<vscale x 4 x i1> %x) {
; CHECK-LABEL: test_nxv4i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vmv.s.x v10, zero
-; CHECK-NEXT: vredsum.vs v8, v8, v10
-; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = zext <vscale x 4 x i1> %x to <vscale x 4 x i32>
@@ -37,12 +29,8 @@ entry:
define i32 @test_nxv8i1(<vscale x 8 x i1> %x) {
; CHECK-LABEL: test_nxv8i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vmv.s.x v12, zero
-; CHECK-NEXT: vredsum.vs v8, v8, v12
-; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = zext <vscale x 8 x i1> %x to <vscale x 8 x i32>
@@ -53,12 +41,8 @@ entry:
define i32 @test_nxv16i1(<vscale x 16 x i1> %x) {
; CHECK-LABEL: test_nxv16i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vmv.s.x v16, zero
-; CHECK-NEXT: vredsum.vs v8, v8, v16
-; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = zext <vscale x 16 x i1> %x to <vscale x 16 x i32>
@@ -69,19 +53,8 @@ entry:
define i32 @test_nxv32i1(<vscale x 32 x i1> %x) {
; CHECK-LABEL: test_nxv32i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmv.v.i v16, 0
-; CHECK-NEXT: srli a0, a0, 2
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v8, v0, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
-; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
-; CHECK-NEXT: vmv.s.x v8, zero
-; CHECK-NEXT: vredsum.vs v8, v16, v8
-; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = zext <vscale x 32 x i1> %x to <vscale x 32 x i32>
@@ -92,29 +65,8 @@ entry:
define i32 @test_nxv64i1(<vscale x 64 x i1> %x) {
; CHECK-LABEL: test_nxv64i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: vmv.v.i v24, 0
-; CHECK-NEXT: srli a1, a0, 1
-; CHECK-NEXT: srli a0, a0, 2
-; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v9, v0, a1
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v0, a0
-; CHECK-NEXT: vslidedown.vx v10, v9, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
-; CHECK-NEXT: vmerge.vim v16, v24, 1, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vim v24, v24, 1, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vadd.vi v24, v24, 1, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
-; CHECK-NEXT: vadd.vv v8, v24, v16
-; CHECK-NEXT: vmv.s.x v16, zero
-; CHECK-NEXT: vredsum.vs v8, v8, v16
-; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = zext <vscale x 64 x i1> %x to <vscale x 64 x i32>
>From 90d08bbf03990e87a8420d1c299eb1ff692a4228 Mon Sep 17 00:00:00 2001
From: Sergey Kachkov <sergey.kachkov at syntacore.com>
Date: Tue, 18 Feb 2025 12:28:47 +0300
Subject: [PATCH 3/4] fixup! [RISCV] Add DAG combine to convert (iN reduce.add
(zext (vXi1 A to vXiN)) into vcpop.m
Address review comment
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 20c2785e421ca..2a671075b10ea 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18107,7 +18107,7 @@ static SDValue combineTruncToVnclip(SDNode *N, SelectionDAG &DAG,
// FIXME: It's complicated to match all the variations of this after type
// legalization so we only handle the pre-type legalization pattern, but that
// requires the fixed vector type to be legal.
-static SDValue combineVCPOP(SDNode *N, SelectionDAG &DAG,
+static SDValue combineToVCPOP(SDNode *N, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
unsigned Opc = N->getOpcode();
assert((Opc == ISD::CTPOP || Opc == ISD::VECREDUCE_ADD) &&
@@ -19231,7 +19231,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
}
case ISD::CTPOP:
case ISD::VECREDUCE_ADD:
- if (SDValue V = combineVCPOP(N, DAG, Subtarget))
+ if (SDValue V = combineToVCPOP(N, DAG, Subtarget))
return V;
break;
}
>From c4bcc3f9d3f80c8f76fa42d5ae0a2b5cc7541efc Mon Sep 17 00:00:00 2001
From: Sergey Kachkov <sergey.kachkov at syntacore.com>
Date: Tue, 18 Feb 2025 12:36:05 +0300
Subject: [PATCH 4/4] fixup! fixup! [RISCV] Add DAG combine to convert (iN
reduce.add (zext (vXi1 A to vXiN)) into vcpop.m
Apply clang-format
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 2a671075b10ea..28603fc7361f4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18108,7 +18108,7 @@ static SDValue combineTruncToVnclip(SDNode *N, SelectionDAG &DAG,
// legalization so we only handle the pre-type legalization pattern, but that
// requires the fixed vector type to be legal.
static SDValue combineToVCPOP(SDNode *N, SelectionDAG &DAG,
- const RISCVSubtarget &Subtarget) {
+ const RISCVSubtarget &Subtarget) {
unsigned Opc = N->getOpcode();
assert((Opc == ISD::CTPOP || Opc == ISD::VECREDUCE_ADD) &&
"Unexpected opcode");
More information about the llvm-commits
mailing list