[llvm] b07d08b - [RISCV] Add additional vslide1up test coverage

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Tue May 30 10:54:09 PDT 2023


Author: Philip Reames
Date: 2023-05-30T10:53:58-07:00
New Revision: b07d08bb8590b2689f6dc5fbea1ab32b703fcff6

URL: https://github.com/llvm/llvm-project/commit/b07d08bb8590b2689f6dc5fbea1ab32b703fcff6
DIFF: https://github.com/llvm/llvm-project/commit/b07d08bb8590b2689f6dc5fbea1ab32b703fcff6.diff

LOG: [RISCV] Add additional vslide1up test coverage

Add another form of the same pattern (as_rotate tests), and add coverage for a couple corner cases I got wrong at first in an upcoming rewrite.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-vslide1up.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-vslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-vslide1up.ll
index 21fb38643bf2..19154c6a7a70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-vslide1up.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-vslide1up.ll
@@ -304,6 +304,35 @@ define <4 x i8> @vslide1up_4xi8_inverted(<4 x i8> %v, i8 %b) {
   ret <4 x i8> %v2
 }
 
+define <2 x double> @vslide1up_2xf64_as_rotate(<2 x double> %v, double %b) {
+; CHECK-LABEL: vslide1up_2xf64_as_rotate:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vfmv.s.f v9, fa0
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
+; CHECK-NEXT:    vslideup.vi v9, v8, 1
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+  %v1 = insertelement <2 x double> %v, double %b, i64 1
+  %v2 = shufflevector <2 x double> %v1, <2 x double> poison, <2 x i32> <i32 1, i32 0>
+  ret <2 x double> %v2
+}
+
+define <4 x i8> @vslide1up_4xi8_as_rotate(<4 x i8> %v, i8 %b) {
+; CHECK-LABEL: vslide1up_4xi8_as_rotate:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    vslidedown.vi v9, v8, 3
+; CHECK-NEXT:    vslideup.vi v9, v8, 1
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+  %v1 = insertelement <4 x i8> %v, i8 %b, i64 3
+  %v2 = shufflevector <4 x i8> %v1, <4 x i8> poison, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+  ret <4 x i8> %v2
+}
 
 ; The length of the shift is less than the suffix, since we'd have to
 ; materailize the splat, using the vslide1up doesn't help us.
@@ -333,3 +362,55 @@ define <4 x i32> @vslide1up_4xi32_neg2(<4 x i32> %v1, <4 x i32> %v2) {
   %res = shufflevector <4 x i32> %v1, <4 x i32> %v2, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
   ret <4 x i32> %res
 }
+
+; Not profitable - can just use a slideup instead
+define <4 x i8> @vslide1up_4xi8_neg_undef_insert(<4 x i8> %v, i8 %b) {
+; CHECK-LABEL: vslide1up_4xi8_neg_undef_insert:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT:    vslideup.vi v9, v8, 1
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+  %v2 = shufflevector <4 x i8> poison, <4 x i8> %v, <4 x i32> <i32 0, i32 4, i32 5, i32 6>
+  ret <4 x i8> %v2
+}
+
+define <4 x i8> @vslide1up_4xi8_neg_incorrect_insert(<4 x i8> %v, i8 %b) {
+; CHECK-LABEL: vslide1up_4xi8_neg_incorrect_insert:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI23_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI23_0)
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT:    vle8.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+  %v2 = shufflevector <4 x i8> poison, <4 x i8> %v, <4 x i32> <i32 4, i32 4, i32 5, i32 6>
+  ret <4 x i8> %v2
+}
+
+define <4 x i8> @vslide1up_4xi8_neg_incorrect_insert2(<4 x i8> %v, i8 %b) {
+; CHECK-LABEL: vslide1up_4xi8_neg_incorrect_insert2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 3
+; CHECK-NEXT:    vslideup.vi v9, v8, 1
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+  %v2 = shufflevector <4 x i8> poison, <4 x i8> %v, <4 x i32> <i32 7, i32 4, i32 5, i32 6>
+  ret <4 x i8> %v2
+}
+
+define <4 x i8> @vslide1up_4xi8_neg_incorrect_insert3(<4 x i8> %v, i8 %b) {
+; CHECK-LABEL: vslide1up_4xi8_neg_incorrect_insert3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI25_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI25_0)
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT:    vle8.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+  %v2 = shufflevector <4 x i8> poison, <4 x i8> %v, <4 x i32> <i32 5, i32 4, i32 5, i32 6>
+  ret <4 x i8> %v2
+}


        


More information about the llvm-commits mailing list