[llvm] faa385a - [RISCV] Add tests for length changing shuffles

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 1 09:51:46 PDT 2024


Author: Luke Lau
Date: 2024-11-01T09:51:39-07:00
New Revision: faa385a9f4d164791e2400f3f53e9c021114f52b

URL: https://github.com/llvm/llvm-project/commit/faa385a9f4d164791e2400f3f53e9c021114f52b
DIFF: https://github.com/llvm/llvm-project/commit/faa385a9f4d164791e2400f3f53e9c021114f52b.diff

LOG: [RISCV] Add tests for length changing shuffles

Tests taken from Luke's 88147 with minimal changes by me (preames).

The main case of interest here is when mask length is less than source
length (i.e. length is decreasing).  We often scalarize these, which
on RISCV can be quite painful.

Added: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-changes-length.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-changes-length.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-changes-length.ll
new file mode 100644
index 00000000000000..29038df652e71e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-changes-length.ll
@@ -0,0 +1,310 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck -check-prefixes=CHECK,RV32 %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck -check-prefixes=CHECK,RV64 %s
+
+
+define <8 x i1> @v8i1_v16i1(<16 x i1>) {
+; RV32-LABEL: v8i1_v16i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a0, v0
+; RV32-NEXT:    slli a1, a0, 19
+; RV32-NEXT:    srli a1, a1, 31
+; RV32-NEXT:    slli a2, a0, 26
+; RV32-NEXT:    srli a2, a2, 31
+; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-NEXT:    vmv.v.x v8, a2
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    slli a1, a0, 24
+; RV32-NEXT:    srli a1, a1, 31
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    slli a1, a0, 29
+; RV32-NEXT:    srli a1, a1, 31
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    slli a1, a0, 18
+; RV32-NEXT:    srli a1, a1, 31
+; RV32-NEXT:    slli a2, a0, 16
+; RV32-NEXT:    srli a2, a2, 31
+; RV32-NEXT:    vmv.v.x v9, a2
+; RV32-NEXT:    vslide1down.vx v9, v9, a1
+; RV32-NEXT:    slli a1, a0, 27
+; RV32-NEXT:    srli a1, a1, 31
+; RV32-NEXT:    vslide1down.vx v9, v9, a1
+; RV32-NEXT:    slli a0, a0, 28
+; RV32-NEXT:    srli a0, a0, 31
+; RV32-NEXT:    vmv.v.i v0, 15
+; RV32-NEXT:    vslide1down.vx v9, v9, a0
+; RV32-NEXT:    vslidedown.vi v9, v8, 4, v0.t
+; RV32-NEXT:    vand.vi v8, v9, 1
+; RV32-NEXT:    vmsne.vi v0, v8, 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: v8i1_v16i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a0, v0
+; RV64-NEXT:    slli a1, a0, 51
+; RV64-NEXT:    srli a1, a1, 63
+; RV64-NEXT:    slli a2, a0, 58
+; RV64-NEXT:    srli a2, a2, 63
+; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-NEXT:    vmv.v.x v8, a2
+; RV64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-NEXT:    slli a1, a0, 56
+; RV64-NEXT:    srli a1, a1, 63
+; RV64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-NEXT:    slli a1, a0, 61
+; RV64-NEXT:    srli a1, a1, 63
+; RV64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-NEXT:    slli a1, a0, 50
+; RV64-NEXT:    srli a1, a1, 63
+; RV64-NEXT:    slli a2, a0, 48
+; RV64-NEXT:    srli a2, a2, 63
+; RV64-NEXT:    vmv.v.x v9, a2
+; RV64-NEXT:    vslide1down.vx v9, v9, a1
+; RV64-NEXT:    slli a1, a0, 59
+; RV64-NEXT:    srli a1, a1, 63
+; RV64-NEXT:    vslide1down.vx v9, v9, a1
+; RV64-NEXT:    slli a0, a0, 60
+; RV64-NEXT:    srli a0, a0, 63
+; RV64-NEXT:    vmv.v.i v0, 15
+; RV64-NEXT:    vslide1down.vx v9, v9, a0
+; RV64-NEXT:    vslidedown.vi v9, v8, 4, v0.t
+; RV64-NEXT:    vand.vi v8, v9, 1
+; RV64-NEXT:    vmsne.vi v0, v8, 0
+; RV64-NEXT:    ret
+  %2 = shufflevector <16 x i1> %0, <16 x i1> poison, <8 x i32> <i32 5, i32 12, i32 7, i32 2, i32 15, i32 13, i32 4, i32 3>
+  ret <8 x i1> %2
+}
+
+define <4 x i32> @v4i32_v8i32(<8 x i32>) {
+; CHECK-LABEL: v4i32_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    vsrl.vi v10, v10, 1
+; CHECK-NEXT:    vrsub.vi v11, v10, 3
+; CHECK-NEXT:    vrgather.vv v10, v8, v11
+; CHECK-NEXT:    vmv.v.i v0, 5
+; CHECK-NEXT:    vsetivli zero, 4, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 4
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %2 = shufflevector <8 x i32> %0, <8 x i32> poison, <4 x i32> <i32 5, i32 3, i32 7, i32 2>
+  ret <4 x i32> %2
+}
+
+define <4 x i32> @v4i32_v16i32(<16 x i32>) {
+; RV32-LABEL: v4i32_v16i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vmv.v.i v12, 1
+; RV32-NEXT:    vmv.v.i v14, 6
+; RV32-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV32-NEXT:    vslideup.vi v14, v12, 1
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vid.v v12
+; RV32-NEXT:    vadd.vv v12, v12, v12
+; RV32-NEXT:    vadd.vi v15, v12, 1
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT:    vrgatherei16.vv v12, v8, v15
+; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT:    vmv.v.i v0, 10
+; RV32-NEXT:    vsetivli zero, 8, e32, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 8
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v12, v8, v14, v0.t
+; RV32-NEXT:    vmv1r.v v8, v12
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: v4i32_v16i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT:    vid.v v12
+; RV64-NEXT:    vadd.vv v12, v12, v12
+; RV64-NEXT:    vadd.vi v14, v12, 1
+; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT:    vrgatherei16.vv v12, v8, v14
+; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT:    vmv.v.i v0, 10
+; RV64-NEXT:    vsetivli zero, 8, e32, m4, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 8
+; RV64-NEXT:    li a0, 3
+; RV64-NEXT:    slli a0, a0, 33
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a0, a0, 16
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v10, a0
+; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV64-NEXT:    vrgatherei16.vv v12, v8, v10, v0.t
+; RV64-NEXT:    vmv1r.v v8, v12
+; RV64-NEXT:    ret
+  %2 = shufflevector <16 x i32> %0, <16 x i32> poison, <4 x i32> <i32 1, i32 9, i32 5, i32 14>
+  ret <4 x i32> %2
+}
+
+define <4 x i32> @v4i32_v32i32(<32 x i32>) {
+; RV32-LABEL: v4i32_v32i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -256
+; RV32-NEXT:    .cfi_def_cfa_offset 256
+; RV32-NEXT:    sw ra, 252(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 248(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    addi s0, sp, 256
+; RV32-NEXT:    .cfi_def_cfa s0, 0
+; RV32-NEXT:    andi sp, sp, -128
+; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    mv a1, sp
+; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT:    vse32.v v8, (a1)
+; RV32-NEXT:    lw a0, 36(sp)
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 1
+; RV32-NEXT:    vmv.x.s a1, v10
+; RV32-NEXT:    vmv.v.x v10, a1
+; RV32-NEXT:    vslide1down.vx v10, v10, a0
+; RV32-NEXT:    lw a0, 120(sp)
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 4
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v10, a1
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    addi sp, s0, -256
+; RV32-NEXT:    lw ra, 252(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 248(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 256
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: v4i32_v32i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -256
+; RV64-NEXT:    .cfi_def_cfa_offset 256
+; RV64-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    addi s0, sp, 256
+; RV64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-NEXT:    andi sp, sp, -128
+; RV64-NEXT:    li a0, 32
+; RV64-NEXT:    mv a1, sp
+; RV64-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; RV64-NEXT:    vse32.v v8, (a1)
+; RV64-NEXT:    lw a0, 36(sp)
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 1
+; RV64-NEXT:    vmv.x.s a1, v10
+; RV64-NEXT:    vmv.v.x v10, a1
+; RV64-NEXT:    vslide1down.vx v10, v10, a0
+; RV64-NEXT:    lw a0, 120(sp)
+; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 4
+; RV64-NEXT:    vmv.x.s a1, v8
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    vslide1down.vx v8, v10, a1
+; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    addi sp, s0, -256
+; RV64-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 256
+; RV64-NEXT:    ret
+  %2 = shufflevector <32 x i32> %0, <32 x i32> poison, <4 x i32> <i32 1, i32 9, i32 4, i32 30>
+  ret <4 x i32> %2
+}
+
+define <16 x i1> @v16i1_v8i1(<8 x i1>) {
+; CHECK-LABEL: v16i1_v8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI4_0)
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vmv.v.i v9, 0
+; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT:    vrgather.vv v10, v9, v8
+; CHECK-NEXT:    vmsne.vi v0, v10, 0
+; CHECK-NEXT:    ret
+  %2 = shufflevector <8 x i1> %0, <8 x i1> poison, <16 x i32> <i32 2, i32 3, i32 0, i32 5, i32 1, i32 2, i32 0, i32 6, i32 2, i32 3, i32 0, i32 7, i32 1, i32 2, i32 0, i32 4>
+  ret <16 x i1> %2
+}
+
+define <8 x i32> @v8i32_v4i32(<4 x i32>) {
+; CHECK-LABEL: v8i32_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI5_0)
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vle16.v v12, (a0)
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v12
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %2 = shufflevector <4 x i32> %0, <4 x i32> poison, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 1, i32 2, i32 0, i32 3>
+  ret <8 x i32> %2
+}
+
+define <16 x i32> @v16i32_v4i32(<4 x i32>) {
+; CHECK-LABEL: v16i32_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 2
+; CHECK-NEXT:    addi a1, a0, 265
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.s.x v0, a1
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.i v9, 3
+; CHECK-NEXT:    vmerge.vim v10, v9, 2, v0
+; CHECK-NEXT:    lui a1, 4
+; CHECK-NEXT:    addi a1, a1, 548
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.s.x v0, a1
+; CHECK-NEXT:    addi a0, a0, -1856
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmerge.vim v10, v10, 0, v0
+; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v16, v9
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v16
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+  %2 = shufflevector <4 x i32> %0, <4 x i32> poison, <16 x i32> <i32 2, i32 3, i32 0, i32 2, i32 3, i32 0, i32 1, i32 1, i32 2, i32 0, i32 3, i32 1, i32 1, i32 2, i32 0, i32 3>
+  ret <16 x i32> %2
+}
+
+define <32 x i32> @v32i32_v4i32(<4 x i32>) {
+; CHECK-LABEL: v32i32_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    lui a1, 135432
+; CHECK-NEXT:    addi a1, a1, 1161
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.s.x v0, a1
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vmv.v.i v10, 3
+; CHECK-NEXT:    vmerge.vim v10, v10, 2, v0
+; CHECK-NEXT:    lui a0, 270865
+; CHECK-NEXT:    addi a0, a0, 548
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vmv.s.x v0, a0
+; CHECK-NEXT:    lui a0, 100550
+; CHECK-NEXT:    addi a0, a0, 64
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmerge.vim v10, v10, 0, v0
+; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vsext.vf2 v24, v10
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v16, v8, v24
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+  %2 = shufflevector <4 x i32> %0, <4 x i32> poison, <32 x i32> <i32 2, i32 3, i32 0, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 2, i32 3, i32 0, i32 1, i32 1, i32 2, i32 0, i32 3, i32 1, i32 1, i32 2, i32 0, i32 3, i32 1, i32 2, i32 0, i32 3, i32 1, i32 1, i32 2, i32 0, i32 3>
+  ret <32 x i32> %2
+}


        


More information about the llvm-commits mailing list