[llvm] ed8a6d6 - [RISCV] Add VLA shuffle coverage
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 5 18:50:57 PST 2025
Author: Philip Reames
Date: 2025-02-05T18:47:45-08:00
New Revision: ed8a6d6155a901af5395d13fc530277c50d02a71
URL: https://github.com/llvm/llvm-project/commit/ed8a6d6155a901af5395d13fc530277c50d02a71
DIFF: https://github.com/llvm/llvm-project/commit/ed8a6d6155a901af5395d13fc530277c50d02a71.diff
LOG: [RISCV] Add VLA shuffle coverage
Add coverage for a few cases which have come up in discussion of
recent VLA shuffle lowering changes.
Added:
Modified:
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
index 8b26c58d5bee19..ac0e211302ed2e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
@@ -1340,3 +1340,51 @@ define void @shuffle_i256_splat(ptr %p) nounwind {
ret void
}
+define <16 x i32> @shuffle_m1_prefix(<16 x i32> %a) {
+; RV32-LABEL: shuffle_m1_prefix:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI84_0)
+; RV32-NEXT: addi a0, a0, %lo(.LCPI84_0)
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vle16.v v16, (a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; RV32-NEXT: vrgatherei16.vv v13, v9, v16
+; RV32-NEXT: vrgatherei16.vv v12, v8, v16
+; RV32-NEXT: vrgatherei16.vv v14, v10, v16
+; RV32-NEXT: vrgatherei16.vv v15, v11, v16
+; RV32-NEXT: vmv4r.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: shuffle_m1_prefix:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, 131073
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: addi a0, a0, 3
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: addi a0, a0, 2
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vmv.v.x v16, a0
+; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; RV64-NEXT: vrgatherei16.vv v13, v9, v16
+; RV64-NEXT: vrgatherei16.vv v12, v8, v16
+; RV64-NEXT: vrgatherei16.vv v14, v10, v16
+; RV64-NEXT: vrgatherei16.vv v15, v11, v16
+; RV64-NEXT: vmv4r.v v8, v12
+; RV64-NEXT: ret
+ %out = shufflevector <16 x i32> %a, <16 x i32> poison, <16 x i32> <i32 2, i32 3, i32 1, i32 2, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ ret <16 x i32> %out
+}
+
+define <16 x i32> @shuffle_m2_prefix(<16 x i32> %a) {
+; CHECK-LABEL: shuffle_m2_prefix:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI85_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI85_0)
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %out = shufflevector <16 x i32> %a, <16 x i32> poison, <16 x i32> <i32 2, i32 3, i32 5, i32 2, i32 3, i32 5, i32 7, i32 4, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ ret <16 x i32> %out
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
index ddbf976553c21e..c222626a166fea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
@@ -1330,6 +1330,48 @@ define <16 x i32> @reverse_v16i32_exact_vlen_256(<16 x i32> %a) vscale_range(4,
%res = shufflevector <16 x i32> %a, <16 x i32> poison, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <16 x i32> %res
}
+
+define <8 x i32> @reverse_v8i32_undef_suffix(<8 x i32> %a) {
+; CHECK-LABEL: reverse_v8i32_undef_suffix:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vid.v v10
+; CHECK-NEXT: srli a1, a0, 2
+; CHECK-NEXT: srli a0, a0, 1
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vrsub.vx v10, v10, a1
+; CHECK-NEXT: vrgather.vv v13, v8, v10
+; CHECK-NEXT: vrgather.vv v12, v9, v10
+; CHECK-NEXT: addi a0, a0, -8
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v12, a0
+; CHECK-NEXT: ret
+ %res = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i32> %res
+}
+
+define <8 x i32> @reverse_v8i32_undef_prefix(<8 x i32> %a) {
+; CHECK-LABEL: reverse_v8i32_undef_prefix:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vid.v v10
+; CHECK-NEXT: srli a1, a0, 2
+; CHECK-NEXT: srli a0, a0, 1
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vrsub.vx v10, v10, a1
+; CHECK-NEXT: vrgather.vv v13, v8, v10
+; CHECK-NEXT: vrgather.vv v12, v9, v10
+; CHECK-NEXT: addi a0, a0, -8
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v12, a0
+; CHECK-NEXT: ret
+ %res = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x i32> %res
+}
+
+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; RV32: {{.*}}
; RV32-ZVBB: {{.*}}
More information about the llvm-commits
mailing list