[llvm] 78ab771 - [RISCV][NFC]Add more test for shuffles with exact vlen, NFC

Alexey Bataev via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 20 14:27:54 PST 2024


Author: Alexey Bataev
Date: 2024-12-20T14:27:43-08:00
New Revision: 78ab77199118414aad8632d990bf73aca3d16ff8

URL: https://github.com/llvm/llvm-project/commit/78ab77199118414aad8632d990bf73aca3d16ff8
DIFF: https://github.com/llvm/llvm-project/commit/78ab77199118414aad8632d990bf73aca3d16ff8.diff

LOG: [RISCV][NFC]Add more test for shuffles with exact vlen, NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
index cadee8acf27d65..f0ee780137300f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 define <4 x i64> @m2_splat_0(<4 x i64> %v1) vscale_range(2,2) {
 ; CHECK-LABEL: m2_splat_0:
@@ -69,16 +69,18 @@ define <4 x i64> @m2_pair_swap_vl4(<4 x i64> %v1) vscale_range(2,2) {
 define <8 x i32> @m2_pair_swap_vl8(<8 x i32> %v1) vscale_range(2,2) {
 ; RV32-LABEL: m2_pair_swap_vl8:
 ; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.i v10, 0
 ; RV32-NEXT:    li a0, 32
-; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT:    li a1, 63
+; RV32-NEXT:    vwsubu.vx v12, v10, a0
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32-NEXT:    vmv.v.x v10, a0
-; RV32-NEXT:    li a0, 63
-; RV32-NEXT:    vand.vx v12, v10, a0
-; RV32-NEXT:    vsll.vv v12, v8, v12
-; RV32-NEXT:    vrsub.vi v10, v10, 0
-; RV32-NEXT:    vand.vx v10, v10, a0
-; RV32-NEXT:    vsrl.vv v8, v8, v10
-; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vand.vx v12, v12, a1
+; RV32-NEXT:    vand.vx v10, v10, a1
+; RV32-NEXT:    vsrl.vv v12, v8, v12
+; RV32-NEXT:    vsll.vv v8, v8, v10
+; RV32-NEXT:    vor.vv v8, v8, v12
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: m2_pair_swap_vl8:
@@ -176,3 +178,137 @@ define <4 x i64> @m2_splat_into_slide_two_source(<4 x i64> %v1, <4 x i64> %v2) v
   %res = shufflevector <4 x i64> %v1, <4 x i64> %v2, <4 x i32> <i32 0, i32 0, i32 5, i32 6>
   ret <4 x i64> %res
 }
+
+define void @shuffle1(ptr %explicit_0, ptr %explicit_1) vscale_range(2,2) {
+; CHECK-LABEL: shuffle1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a0, a0, 252
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vid.v v8
+; CHECK-NEXT:    vsetivli zero, 3, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v9, (a0)
+; CHECK-NEXT:    li a0, 175
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vmv.s.x v0, a0
+; CHECK-NEXT:    vadd.vi v8, v8, 1
+; CHECK-NEXT:    vrgather.vv v11, v9, v8
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vmerge.vim v8, v10, 0, v0
+; CHECK-NEXT:    addi a0, a1, 672
+; CHECK-NEXT:    vs2r.v v8, (a0)
+; CHECK-NEXT:    ret
+  %1 = getelementptr i32, ptr %explicit_0, i64 63
+  %2 = load <3 x i32>, ptr %1, align 1
+  %3 = shufflevector <3 x i32> %2, <3 x i32> undef, <2 x i32> <i32 1, i32 2>
+  %4 = shufflevector <2 x i32> %3, <2 x i32> undef, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %5 = shufflevector <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 undef, i32 0, i32 undef, i32 0>, <8 x i32> %4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 5, i32 9, i32 7>
+  %6 = getelementptr inbounds <8 x i32>, ptr %explicit_1, i64 21
+  store <8 x i32> %5, ptr %6, align 32
+  ret void
+}
+
+define <16 x float> @shuffle2(<4 x float> %a) vscale_range(2,2) {
+; CHECK-LABEL: shuffle2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vid.v v9
+; CHECK-NEXT:    li a0, -97
+; CHECK-NEXT:    vadd.vv v9, v9, v9
+; CHECK-NEXT:    vrsub.vi v9, v9, 4
+; CHECK-NEXT:    vmv.s.x v0, a0
+; CHECK-NEXT:    vrgather.vv v13, v8, v9
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vmerge.vim v8, v12, 0, v0
+; CHECK-NEXT:    ret
+  %b = extractelement <4 x float> %a, i32 2
+  %c = insertelement <16 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %b, i32 5
+  %b1 = extractelement <4 x float> %a, i32 0
+  %c1 = insertelement <16 x float> %c, float %b1, i32 6
+  ret <16 x float>%c1
+}
+
+define i64 @extract_any_extend_vector_inreg_v16i64(<16 x i64> %a0, i32 %a1) vscale_range(2,2) {
+; RV32-LABEL: extract_any_extend_vector_inreg_v16i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 16
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV32-NEXT:    vmv.v.i v16, 0
+; RV32-NEXT:    vmv.s.x v0, a1
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vrgather.vi v16, v8, 15, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT:    vslidedown.vx v8, v16, a0
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extract_any_extend_vector_inreg_v16i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -256
+; RV64-NEXT:    .cfi_def_cfa_offset 256
+; RV64-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s2, 232(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    .cfi_offset s2, -24
+; RV64-NEXT:    addi s0, sp, 256
+; RV64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-NEXT:    andi sp, sp, -128
+; RV64-NEXT:    li a1, -17
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT:    vmv.s.x v0, a1
+; RV64-NEXT:    vrgather.vi v16, v8, 15
+; RV64-NEXT:    vmerge.vim v8, v16, 0, v0
+; RV64-NEXT:    mv s2, sp
+; RV64-NEXT:    vs8r.v v8, (s2)
+; RV64-NEXT:    andi a0, a0, 15
+; RV64-NEXT:    li a1, 8
+; RV64-NEXT:    call __muldi3
+; RV64-NEXT:    add a0, s2, a0
+; RV64-NEXT:    ld a0, 0(a0)
+; RV64-NEXT:    addi sp, s0, -256
+; RV64-NEXT:    .cfi_def_cfa sp, 256
+; RV64-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s2, 232(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    .cfi_restore s2
+; RV64-NEXT:    addi sp, sp, 256
+; RV64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-NEXT:    ret
+  %1 = extractelement <16 x i64> %a0, i32 15
+  %2 = insertelement <16 x i64> zeroinitializer, i64 %1, i32 4
+  %3 = extractelement <16 x i64> %2, i32 %a1
+  ret i64 %3
+}
+
+define <4 x double> @shuffles_add(<4 x double> %0, <4 x double> %1) vscale_range(2,2) {
+; CHECK-LABEL: shuffles_add:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vrgather.vi v12, v8, 2
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vid.v v14
+; CHECK-NEXT:    vmv.v.i v0, 12
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vrgather.vi v16, v8, 3
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vadd.vv v8, v14, v14
+; CHECK-NEXT:    vadd.vi v9, v8, -4
+; CHECK-NEXT:    vadd.vi v8, v8, -3
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v12, v10, v9, v0.t
+; CHECK-NEXT:    vrgatherei16.vv v16, v10, v8, v0.t
+; CHECK-NEXT:    vfadd.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %3 = shufflevector <4 x double> %0, <4 x double> %1, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
+  %4 = shufflevector <4 x double> %0, <4 x double> %1, <4 x i32> <i32 undef, i32 3, i32 5, i32 7>
+  %5 = fadd <4 x double> %3, %4
+  ret <4 x double> %5
+}
+


        


More information about the llvm-commits mailing list