[llvm] c02da38 - [RISCV] Add tests for deinterleave(2-8) shuffles
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 7 14:28:57 PST 2024
Author: Philip Reames
Date: 2024-11-07T14:28:42-08:00
New Revision: c02da382471fd0b338af76ce220e9567e3cb854a
URL: https://github.com/llvm/llvm-project/commit/c02da382471fd0b338af76ce220e9567e3cb854a
DIFF: https://github.com/llvm/llvm-project/commit/c02da382471fd0b338af76ce220e9567e3cb854a.diff
LOG: [RISCV] Add tests for deinterleave(2-8) shuffles
Added:
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll
new file mode 100644
index 00000000000000..a8f75f8d1c24d1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll
@@ -0,0 +1,308 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v \
+; RUN: -lower-interleaved-accesses=false -verify-machineinstrs \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32V
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v \
+; RUN: -lower-interleaved-accesses=false -verify-machineinstrs \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64V
+
+define void @deinterleave3_0_i8(ptr %in, ptr %out) {
+; CHECK-LABEL: deinterleave3_0_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: li a0, 3
+; CHECK-NEXT: vmul.vx v9, v9, a0
+; CHECK-NEXT: vrgather.vv v10, v8, v9
+; CHECK-NEXT: vadd.vi v9, v9, -8
+; CHECK-NEXT: li a0, 56
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 8
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vse8.v v10, (a1)
+; CHECK-NEXT: ret
+entry:
+ %0 = load <16 x i8>, ptr %in, align 1
+ %shuffle.i5 = shufflevector <16 x i8> %0, <16 x i8> poison, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 undef, i32 undef>
+ store <8 x i8> %shuffle.i5, ptr %out, align 1
+ ret void
+}
+
+define void @deinterleave3_8_i8(ptr %in, ptr %out) {
+; CHECK-LABEL: deinterleave3_8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v9, 1
+; CHECK-NEXT: vid.v v10
+; CHECK-NEXT: li a0, 3
+; CHECK-NEXT: vmadd.vx v10, a0, v9
+; CHECK-NEXT: vrgather.vv v9, v8, v10
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 8
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vsrl.vi v10, v8, 8
+; CHECK-NEXT: vsll.vi v8, v8, 8
+; CHECK-NEXT: li a0, 24
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vor.vv v8, v8, v10
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: vse8.v v8, (a1)
+; CHECK-NEXT: ret
+entry:
+ %0 = load <16 x i8>, ptr %in, align 1
+ %shuffle.i5 = shufflevector <16 x i8> %0, <16 x i8> poison, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 undef, i32 undef, i32 undef>
+ store <8 x i8> %shuffle.i5, ptr %out, align 1
+ ret void
+}
+
+define void @deinterleave4_0_i8(ptr %in, ptr %out) {
+; CHECK-LABEL: deinterleave4_0_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 4
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: vwmaccu.vx v10, a0, v9
+; CHECK-NEXT: vmv.v.i v0, 12
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: vsll.vi v9, v9, 2
+; CHECK-NEXT: vadd.vi v9, v9, -8
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 8
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vse8.v v10, (a1)
+; CHECK-NEXT: ret
+entry:
+ %0 = load <16 x i8>, ptr %in, align 1
+ %shuffle.i5 = shufflevector <16 x i8> %0, <16 x i8> poison, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 undef, i32 undef, i32 undef, i32 undef>
+ store <8 x i8> %shuffle.i5, ptr %out, align 1
+ ret void
+}
+
+define void @deinterleave4_8_i8(ptr %in, ptr %out) {
+; CHECK-LABEL: deinterleave4_8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v9, -9
+; CHECK-NEXT: vid.v v10
+; CHECK-NEXT: li a0, 5
+; CHECK-NEXT: vmacc.vx v9, a0, v10
+; CHECK-NEXT: vsll.vi v10, v10, 2
+; CHECK-NEXT: vadd.vi v10, v10, 1
+; CHECK-NEXT: vrgather.vv v11, v8, v10
+; CHECK-NEXT: vmv.v.i v0, 12
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 8
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vv v11, v8, v9, v0.t
+; CHECK-NEXT: vse8.v v11, (a1)
+; CHECK-NEXT: ret
+entry:
+ %0 = load <16 x i8>, ptr %in, align 1
+ %shuffle.i5 = shufflevector <16 x i8> %0, <16 x i8> poison, <8 x i32> <i32 1, i32 5, i32 9, i32 14, i32 undef, i32 undef, i32 undef, i32 undef>
+ store <8 x i8> %shuffle.i5, ptr %out, align 1
+ ret void
+}
+
+define void @deinterleave5_0_i8(ptr %in, ptr %out) {
+; CHECK-LABEL: deinterleave5_0_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: li a0, 5
+; CHECK-NEXT: vmul.vx v9, v9, a0
+; CHECK-NEXT: vrgather.vv v10, v8, v9
+; CHECK-NEXT: vadd.vi v9, v9, -8
+; CHECK-NEXT: vmv.v.i v0, 12
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 8
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vse8.v v10, (a1)
+; CHECK-NEXT: ret
+entry:
+ %0 = load <16 x i8>, ptr %in, align 1
+ %shuffle.i5 = shufflevector <16 x i8> %0, <16 x i8> poison, <8 x i32> <i32 0, i32 5, i32 10, i32 15, i32 undef, i32 undef, i32 undef, i32 undef>
+ store <8 x i8> %shuffle.i5, ptr %out, align 1
+ ret void
+}
+
+define void @deinterleave5_8_i8(ptr %in, ptr %out) {
+; CHECK-LABEL: deinterleave5_8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v9, 1
+; CHECK-NEXT: vid.v v10
+; CHECK-NEXT: li a0, 5
+; CHECK-NEXT: vmadd.vx v10, a0, v9
+; CHECK-NEXT: vrgather.vv v9, v8, v10
+; CHECK-NEXT: vmv.v.i v0, 4
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 8
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vi v9, v8, 3, v0.t
+; CHECK-NEXT: vse8.v v9, (a1)
+; CHECK-NEXT: ret
+entry:
+ %0 = load <16 x i8>, ptr %in, align 1
+ %shuffle.i5 = shufflevector <16 x i8> %0, <16 x i8> poison, <8 x i32> <i32 1, i32 6, i32 11, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ store <8 x i8> %shuffle.i5, ptr %out, align 1
+ ret void
+}
+
+define void @deinterleave6_0_i8(ptr %in, ptr %out) {
+; CHECK-LABEL: deinterleave6_0_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: li a0, 6
+; CHECK-NEXT: vmul.vx v9, v9, a0
+; CHECK-NEXT: vrgather.vv v10, v8, v9
+; CHECK-NEXT: vmv.v.i v0, 4
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 8
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vi v10, v8, 4, v0.t
+; CHECK-NEXT: vse8.v v10, (a1)
+; CHECK-NEXT: ret
+entry:
+ %0 = load <16 x i8>, ptr %in, align 1
+ %shuffle.i5 = shufflevector <16 x i8> %0, <16 x i8> poison, <8 x i32> <i32 0, i32 6, i32 12, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ store <8 x i8> %shuffle.i5, ptr %out, align 1
+ ret void
+}
+
+define void @deinterleave6_8_i8(ptr %in, ptr %out) {
+; CHECK-LABEL: deinterleave6_8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v9, 1
+; CHECK-NEXT: vid.v v10
+; CHECK-NEXT: li a0, 6
+; CHECK-NEXT: vmadd.vx v10, a0, v9
+; CHECK-NEXT: vrgather.vv v9, v8, v10
+; CHECK-NEXT: vmv.v.i v0, 4
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 8
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vi v9, v8, 5, v0.t
+; CHECK-NEXT: vse8.v v9, (a1)
+; CHECK-NEXT: ret
+entry:
+ %0 = load <16 x i8>, ptr %in, align 1
+ %shuffle.i5 = shufflevector <16 x i8> %0, <16 x i8> poison, <8 x i32> <i32 1, i32 7, i32 13, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ store <8 x i8> %shuffle.i5, ptr %out, align 1
+ ret void
+}
+
+define void @deinterleave7_0_i8(ptr %in, ptr %out) {
+; CHECK-LABEL: deinterleave7_0_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: li a0, 7
+; CHECK-NEXT: vmul.vx v9, v9, a0
+; CHECK-NEXT: vrgather.vv v10, v8, v9
+; CHECK-NEXT: vmv.v.i v0, 4
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 8
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vi v10, v8, 6, v0.t
+; CHECK-NEXT: vse8.v v10, (a1)
+; CHECK-NEXT: ret
+entry:
+ %0 = load <16 x i8>, ptr %in, align 1
+ %shuffle.i5 = shufflevector <16 x i8> %0, <16 x i8> poison, <8 x i32> <i32 0, i32 7, i32 14, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ store <8 x i8> %shuffle.i5, ptr %out, align 1
+ ret void
+}
+
+define void @deinterleave7_8_i8(ptr %in, ptr %out) {
+; CHECK-LABEL: deinterleave7_8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v9, -6
+; CHECK-NEXT: vid.v v10
+; CHECK-NEXT: li a0, 6
+; CHECK-NEXT: vmadd.vx v10, a0, v9
+; CHECK-NEXT: vmv.v.i v0, 6
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 8
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vi v11, v8, 1
+; CHECK-NEXT: vrgather.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vse8.v v11, (a1)
+; CHECK-NEXT: ret
+entry:
+ %0 = load <16 x i8>, ptr %in, align 1
+ %shuffle.i5 = shufflevector <16 x i8> %0, <16 x i8> poison, <8 x i32> <i32 1, i32 8, i32 14, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ store <8 x i8> %shuffle.i5, ptr %out, align 1
+ ret void
+}
+
+define void @deinterleave8_0_i8(ptr %in, ptr %out) {
+; CHECK-LABEL: deinterleave8_0_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 8
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 1
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vse8.v v8, (a1)
+; CHECK-NEXT: ret
+entry:
+ %0 = load <16 x i8>, ptr %in, align 1
+ %shuffle.i5 = shufflevector <16 x i8> %0, <16 x i8> poison, <8 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ store <8 x i8> %shuffle.i5, ptr %out, align 1
+ ret void
+}
+
+define void @deinterleave8_8_i8(ptr %in, ptr %out) {
+; CHECK-LABEL: deinterleave8_8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vmv.v.i v0, -3
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 8
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vse8.v v9, (a1)
+; CHECK-NEXT: ret
+entry:
+ %0 = load <16 x i8>, ptr %in, align 1
+ %shuffle.i5 = shufflevector <16 x i8> %0, <16 x i8> poison, <8 x i32> <i32 1, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ store <8 x i8> %shuffle.i5, ptr %out, align 1
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32V: {{.*}}
+; RV64V: {{.*}}
More information about the llvm-commits
mailing list