[llvm] 5ad500c - [RISCV] Coverage for a few missed vector idioms

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 25 16:29:11 PDT 2024


Author: Philip Reames
Date: 2024-10-25T16:28:36-07:00
New Revision: 5ad500ca4a1cba1f39757ba2660d4e0c6e3559d3

URL: https://github.com/llvm/llvm-project/commit/5ad500ca4a1cba1f39757ba2660d4e0c6e3559d3
DIFF: https://github.com/llvm/llvm-project/commit/5ad500ca4a1cba1f39757ba2660d4e0c6e3559d3.diff

LOG: [RISCV] Coverage for a few missed vector idioms

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index 7bf47d42de3b95..ea4072f1571204 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -428,6 +428,33 @@ define void @buildvec_dominant0_v8i16(ptr %x) {
   ret void
 }
 
+define void @buildvec_dominant0_v8i16_with_end_element(ptr %x) {
+; CHECK-LABEL: buildvec_dominant0_v8i16_with_end_element:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 8
+; CHECK-NEXT:    li a1, 3
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vse16.v v8, (a0)
+; CHECK-NEXT:    ret
+  store <8 x i16> <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 3>, ptr %x
+  ret void
+}
+
+define void @buildvec_dominant0_v8i16_with_tail(ptr %x) {
+; CHECK-LABEL: buildvec_dominant0_v8i16_with_tail:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a1, %hi(.LCPI35_0)
+; CHECK-NEXT:    addi a1, a1, %lo(.LCPI35_0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vse16.v v8, (a0)
+; CHECK-NEXT:    ret
+  store <8 x i16> <i16 8, i16 8, i16 8, i16 8, i16 8, i16 undef, i16 2, i16 3>, ptr %x
+  ret void
+}
+
+
 define void @buildvec_dominant1_v8i16(ptr %x) {
 ; CHECK-LABEL: buildvec_dominant1_v8i16:
 ; CHECK:       # %bb.0:
@@ -494,8 +521,8 @@ define <2 x i8> @buildvec_dominant2_v2i8() {
 define void @buildvec_dominant0_v2i32(ptr %x) {
 ; RV32-LABEL: buildvec_dominant0_v2i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI38_0)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI38_0)
+; RV32-NEXT:    lui a1, %hi(.LCPI40_0)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI40_0)
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a1)
 ; RV32-NEXT:    vse32.v v8, (a0)
@@ -503,8 +530,8 @@ define void @buildvec_dominant0_v2i32(ptr %x) {
 ;
 ; RV64V-LABEL: buildvec_dominant0_v2i32:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    lui a1, %hi(.LCPI38_0)
-; RV64V-NEXT:    ld a1, %lo(.LCPI38_0)(a1)
+; RV64V-NEXT:    lui a1, %hi(.LCPI40_0)
+; RV64V-NEXT:    ld a1, %lo(.LCPI40_0)(a1)
 ; RV64V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64V-NEXT:    vmv.v.i v8, -1
 ; RV64V-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
@@ -514,8 +541,8 @@ define void @buildvec_dominant0_v2i32(ptr %x) {
 ;
 ; RV64ZVE32-LABEL: buildvec_dominant0_v2i32:
 ; RV64ZVE32:       # %bb.0:
-; RV64ZVE32-NEXT:    lui a1, %hi(.LCPI38_0)
-; RV64ZVE32-NEXT:    ld a1, %lo(.LCPI38_0)(a1)
+; RV64ZVE32-NEXT:    lui a1, %hi(.LCPI40_0)
+; RV64ZVE32-NEXT:    ld a1, %lo(.LCPI40_0)(a1)
 ; RV64ZVE32-NEXT:    li a2, -1
 ; RV64ZVE32-NEXT:    sd a1, 0(a0)
 ; RV64ZVE32-NEXT:    sd a2, 8(a0)
@@ -527,8 +554,8 @@ define void @buildvec_dominant0_v2i32(ptr %x) {
 define void @buildvec_dominant1_optsize_v2i32(ptr %x) optsize {
 ; RV32-LABEL: buildvec_dominant1_optsize_v2i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI39_0)
+; RV32-NEXT:    lui a1, %hi(.LCPI41_0)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI41_0)
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a1)
 ; RV32-NEXT:    vse32.v v8, (a0)
@@ -536,8 +563,8 @@ define void @buildvec_dominant1_optsize_v2i32(ptr %x) optsize {
 ;
 ; RV64V-LABEL: buildvec_dominant1_optsize_v2i32:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV64V-NEXT:    addi a1, a1, %lo(.LCPI39_0)
+; RV64V-NEXT:    lui a1, %hi(.LCPI41_0)
+; RV64V-NEXT:    addi a1, a1, %lo(.LCPI41_0)
 ; RV64V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64V-NEXT:    vle64.v v8, (a1)
 ; RV64V-NEXT:    vse64.v v8, (a0)
@@ -545,8 +572,8 @@ define void @buildvec_dominant1_optsize_v2i32(ptr %x) optsize {
 ;
 ; RV64ZVE32-LABEL: buildvec_dominant1_optsize_v2i32:
 ; RV64ZVE32:       # %bb.0:
-; RV64ZVE32-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV64ZVE32-NEXT:    ld a1, %lo(.LCPI39_0)(a1)
+; RV64ZVE32-NEXT:    lui a1, %hi(.LCPI41_0)
+; RV64ZVE32-NEXT:    ld a1, %lo(.LCPI41_0)(a1)
 ; RV64ZVE32-NEXT:    li a2, -1
 ; RV64ZVE32-NEXT:    sd a1, 0(a0)
 ; RV64ZVE32-NEXT:    sd a2, 8(a0)
@@ -604,8 +631,8 @@ define void @buildvec_seq_v8i8_v2i32(ptr %x) {
 define void @buildvec_seq_v16i8_v2i64(ptr %x) {
 ; RV32-LABEL: buildvec_seq_v16i8_v2i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI42_0)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI42_0)
+; RV32-NEXT:    lui a1, %hi(.LCPI44_0)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI44_0)
 ; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; RV32-NEXT:    vle8.v v8, (a1)
 ; RV32-NEXT:    vse8.v v8, (a0)
@@ -613,8 +640,8 @@ define void @buildvec_seq_v16i8_v2i64(ptr %x) {
 ;
 ; RV64V-LABEL: buildvec_seq_v16i8_v2i64:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    lui a1, %hi(.LCPI42_0)
-; RV64V-NEXT:    ld a1, %lo(.LCPI42_0)(a1)
+; RV64V-NEXT:    lui a1, %hi(.LCPI44_0)
+; RV64V-NEXT:    ld a1, %lo(.LCPI44_0)(a1)
 ; RV64V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64V-NEXT:    vmv.v.x v8, a1
 ; RV64V-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
@@ -623,8 +650,8 @@ define void @buildvec_seq_v16i8_v2i64(ptr %x) {
 ;
 ; RV64ZVE32-LABEL: buildvec_seq_v16i8_v2i64:
 ; RV64ZVE32:       # %bb.0:
-; RV64ZVE32-NEXT:    lui a1, %hi(.LCPI42_0)
-; RV64ZVE32-NEXT:    addi a1, a1, %lo(.LCPI42_0)
+; RV64ZVE32-NEXT:    lui a1, %hi(.LCPI44_0)
+; RV64ZVE32-NEXT:    addi a1, a1, %lo(.LCPI44_0)
 ; RV64ZVE32-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; RV64ZVE32-NEXT:    vle8.v v8, (a1)
 ; RV64ZVE32-NEXT:    vse8.v v8, (a0)
@@ -656,8 +683,8 @@ define void @buildvec_seq2_v16i8_v2i64(ptr %x) {
 ;
 ; RV64ZVE32-LABEL: buildvec_seq2_v16i8_v2i64:
 ; RV64ZVE32:       # %bb.0:
-; RV64ZVE32-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV64ZVE32-NEXT:    addi a1, a1, %lo(.LCPI43_0)
+; RV64ZVE32-NEXT:    lui a1, %hi(.LCPI45_0)
+; RV64ZVE32-NEXT:    addi a1, a1, %lo(.LCPI45_0)
 ; RV64ZVE32-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; RV64ZVE32-NEXT:    vle8.v v8, (a1)
 ; RV64ZVE32-NEXT:    vse8.v v8, (a0)
@@ -3384,3 +3411,33 @@ define <1 x i32> @buildvec_v1i32_pack(i32 %e1) {
   ret <1 x i32> %v1
 }
 
+define <4 x i32> @buildvec_vslide1up(i32 %e1, i32 %e2) {
+; CHECK-LABEL: buildvec_vslide1up:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a1
+; CHECK-NEXT:    ret
+  %v1 = insertelement <4 x i32> poison, i32 %e2, i32 0
+  %v2 = insertelement <4 x i32> %v1, i32 %e1, i32 1
+  %v3 = insertelement <4 x i32> %v2, i32 %e1, i32 2
+  %v4 = insertelement <4 x i32> %v3, i32 %e1, i32 3
+  ret <4 x i32> %v4
+}
+
+define <4 x i1> @buildvec_i1_splat(i1 %e1) {
+; CHECK-LABEL: buildvec_i1_splat:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %v1 = insertelement <4 x i1> poison, i1 %e1, i32 0
+  %v2 = insertelement <4 x i1> %v1, i1 %e1, i32 1
+  %v3 = insertelement <4 x i1> %v2, i1 %e1, i32 2
+  %v4 = insertelement <4 x i1> %v3, i1 %e1, i32 3
+  ret <4 x i1> %v4
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
index 47cbb2509441ad..5b9af1a3cfe233 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
@@ -566,3 +566,26 @@ define <128 x i1> @buildvec_mask_optsize_v128i1() optsize {
 ; ZVE32F-NEXT:    ret
   ret <128 x i1> <i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 0, i1 1, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 1, i1 1>
 }
+
+define <4 x i1> @buildvec_mask_splat(i1 %e1) {
+; CHECK-LABEL: buildvec_mask_splat:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+;
+; ZVE32F-LABEL: buildvec_mask_splat:
+; ZVE32F:       # %bb.0:
+; ZVE32F-NEXT:    andi a0, a0, 1
+; ZVE32F-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; ZVE32F-NEXT:    vmv.v.x v8, a0
+; ZVE32F-NEXT:    vmsne.vi v0, v8, 0
+; ZVE32F-NEXT:    ret
+  %v1 = insertelement <4 x i1> poison, i1 %e1, i32 0
+  %v2 = insertelement <4 x i1> %v1, i1 %e1, i32 1
+  %v3 = insertelement <4 x i1> %v2, i1 %e1, i32 2
+  %v4 = insertelement <4 x i1> %v3, i1 %e1, i32 3
+  ret <4 x i1> %v4
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
index 351c0bab9dca89..adfae5ede7bb59 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
@@ -366,3 +366,78 @@ entry:
   ret void
 }
 declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>)
+
+define i64 @op_then_reduce(<4 x i64> %v, <4 x i64> %v2) {
+; CHECK-LABEL: op_then_reduce:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vadd.vv v8, v8, v10
+; CHECK-NEXT:    vmv.s.x v10, zero
+; CHECK-NEXT:    vredsum.vs v8, v8, v10
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+entry:
+  %rdx1 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %v)
+  %rdx2 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %v2)
+  %res = add i64 %rdx1, %rdx2
+  ret i64 %res
+}
+
+
+define i64 @two_reduce_scalar_bypass(<4 x i64> %v, <4 x i64> %v2) {
+; CHECK-LABEL: two_reduce_scalar_bypass:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vmv.s.x v12, zero
+; CHECK-NEXT:    vredxor.vs v8, v8, v12
+; CHECK-NEXT:    vredsum.vs v8, v10, v8
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+entry:
+  %rdx1 = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %v)
+  %rdx2 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %v2)
+  %res = add i64 %rdx1, %rdx2
+  ret i64 %res
+}
+
+define i64 @two_reduce_scalar_bypass_zext(<4 x i64> %v, <4 x i32> %v2) {
+; CHECK-LABEL: two_reduce_scalar_bypass_zext:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.s.x v11, zero
+; CHECK-NEXT:    vredsum.vs v10, v10, v11
+; CHECK-NEXT:    vmv.x.s a0, v10
+; CHECK-NEXT:    slli a0, a0, 32
+; CHECK-NEXT:    srli a0, a0, 32
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vmv.s.x v10, a0
+; CHECK-NEXT:    vredsum.vs v8, v8, v10
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+entry:
+  %rdx1 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %v)
+  %rdx2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %v2)
+  %rdx2.zext = zext i32 %rdx2 to i64
+  %res = add i64 %rdx1, %rdx2.zext
+  ret i64 %res
+}
+
+define i64 @two_reduce_scalar_bypass_sext(<4 x i64> %v, <4 x i32> %v2) {
+; CHECK-LABEL: two_reduce_scalar_bypass_sext:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.s.x v11, zero
+; CHECK-NEXT:    vredsum.vs v10, v10, v11
+; CHECK-NEXT:    vmv.x.s a0, v10
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vmv.s.x v10, a0
+; CHECK-NEXT:    vredsum.vs v8, v8, v10
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+entry:
+  %rdx1 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %v)
+  %rdx2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %v2)
+  %rdx2.zext = sext i32 %rdx2 to i64
+  %res = add i64 %rdx1, %rdx2.zext
+  ret i64 %res
+}


        


More information about the llvm-commits mailing list