[llvm] 67bce07 - [RISCV][NFC] Add test for extractelt of vector types that should be split.

via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 27 23:44:08 PDT 2022


Author: jacquesguan
Date: 2022-09-28T14:43:57+08:00
New Revision: 67bce07964da2320980e09fc631280df3b3a4978

URL: https://github.com/llvm/llvm-project/commit/67bce07964da2320980e09fc631280df3b3a4978
DIFF: https://github.com/llvm/llvm-project/commit/67bce07964da2320980e09fc631280df3b3a4978.diff

LOG: [RISCV][NFC] Add test for extractelt of vector types that should be split.

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D134720

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
index bbc9a8af2ab5..53d2213a2a54 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
@@ -579,3 +579,84 @@ define float @extractelt_fdiv_nxv4f32_splat(<vscale x 4 x float> %x) {
   %ext = extractelement <vscale x 4 x float> %bo, i32 0
   ret float %ext
 }
+
+define double @extractelt_nxv16f64_0(<vscale x 16 x double> %v) {
+; CHECK-LABEL: extractelt_nxv16f64_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 0, e64, m8, ta, mu
+; CHECK-NEXT:    vfmv.f.s fa0, v8
+; CHECK-NEXT:    ret
+  %r = extractelement <vscale x 16 x double> %v, i32 0
+  ret double %r
+}
+
+define double @extractelt_nxv16f64_neg1(<vscale x 16 x double> %v) {
+; CHECK-LABEL: extractelt_nxv16f64_neg1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    addi s0, sp, 64
+; CHECK-NEXT:    .cfi_def_cfa s0, 0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    andi sp, sp, -64
+; CHECK-NEXT:    addi a0, sp, 64
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a2, a1, 3
+; CHECK-NEXT:    add a2, a0, a2
+; CHECK-NEXT:    vs8r.v v16, (a2)
+; CHECK-NEXT:    slli a1, a1, 4
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    fld fa0, -8(a0)
+; CHECK-NEXT:    addi sp, s0, -64
+; CHECK-NEXT:    addi sp, sp, 64
+; CHECK-NEXT:    ret
+  %r = extractelement <vscale x 16 x double> %v, i32 -1
+  ret double %r
+}
+
+define double @extractelt_nxv16f64_imm(<vscale x 16 x double> %v) {
+; CHECK-LABEL: extractelt_nxv16f64_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, mu
+; CHECK-NEXT:    vslidedown.vi v8, v8, 2
+; CHECK-NEXT:    vfmv.f.s fa0, v8
+; CHECK-NEXT:    ret
+  %r = extractelement <vscale x 16 x double> %v, i32 2
+  ret double %r
+}
+
+define double @extractelt_nxv16f64_idx(<vscale x 16 x double> %v, i32 signext %idx) {
+; CHECK-LABEL: extractelt_nxv16f64_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a2, a1, 1
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    bltu a0, a2, .LBB54_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a0, a2
+; CHECK-NEXT:  .LBB54_2:
+; CHECK-NEXT:    addi sp, sp, -64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    addi s0, sp, 64
+; CHECK-NEXT:    .cfi_def_cfa s0, 0
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a2, a2, 4
+; CHECK-NEXT:    sub sp, sp, a2
+; CHECK-NEXT:    andi sp, sp, -64
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    addi a2, sp, 64
+; CHECK-NEXT:    add a0, a2, a0
+; CHECK-NEXT:    vs8r.v v8, (a2)
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, a2, a1
+; CHECK-NEXT:    vs8r.v v16, (a1)
+; CHECK-NEXT:    fld fa0, 0(a0)
+; CHECK-NEXT:    addi sp, s0, -64
+; CHECK-NEXT:    addi sp, sp, 64
+; CHECK-NEXT:    ret
+  %r = extractelement <vscale x 16 x double> %v, i32 %idx
+  ret double %r
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
index d712ced93011..3114f0205fe1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
@@ -127,3 +127,45 @@ define i1 @extractelt_nxv64i1(<vscale x 64 x i8>* %x, i64 %idx) nounwind {
   %c = extractelement <vscale x 64 x i1> %b, i64 %idx
   ret i1 %c
 }
+
+define i1 @extractelt_nxv128i1(<vscale x 128 x i8>* %x, i64 %idx) nounwind {
+; CHECK-LABEL: extractelt_nxv128i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a3, a2, 4
+; CHECK-NEXT:    addi a3, a3, -1
+; CHECK-NEXT:    bltu a1, a3, .LBB7_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a1, a3
+; CHECK-NEXT:  .LBB7_2:
+; CHECK-NEXT:    addi sp, sp, -64
+; CHECK-NEXT:    addi s0, sp, 64
+; CHECK-NEXT:    csrr a3, vlenb
+; CHECK-NEXT:    slli a3, a3, 4
+; CHECK-NEXT:    sub sp, sp, a3
+; CHECK-NEXT:    andi sp, sp, -64
+; CHECK-NEXT:    addi a3, sp, 64
+; CHECK-NEXT:    slli a2, a2, 3
+; CHECK-NEXT:    add a4, a0, a2
+; CHECK-NEXT:    vl8r.v v16, (a4)
+; CHECK-NEXT:    vl8r.v v24, (a0)
+; CHECK-NEXT:    add a0, a3, a1
+; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
+; CHECK-NEXT:    vmseq.vi v8, v16, 0
+; CHECK-NEXT:    vmseq.vi v0, v24, 0
+; CHECK-NEXT:    vmv.v.i v16, 0
+; CHECK-NEXT:    vmerge.vim v24, v16, 1, v0
+; CHECK-NEXT:    vs8r.v v24, (a3)
+; CHECK-NEXT:    add a1, a3, a2
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vmerge.vim v8, v16, 1, v0
+; CHECK-NEXT:    vs8r.v v8, (a1)
+; CHECK-NEXT:    lb a0, 0(a0)
+; CHECK-NEXT:    addi sp, s0, -64
+; CHECK-NEXT:    addi sp, sp, 64
+; CHECK-NEXT:    ret
+  %a = load <vscale x 128 x i8>, <vscale x 128 x i8>* %x
+  %b = icmp eq <vscale x 128 x i8> %a, zeroinitializer
+  %c = extractelement <vscale x 128 x i1> %b, i64 %idx
+  ret i1 %c
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
index a0474110795a..49b16286d1f8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
@@ -859,3 +859,84 @@ define i32 @extractelt_udiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
   %ext = extractelement <vscale x 4 x i32> %bo, i32 0
   ret i32 %ext
 }
+
+define i32 @extractelt_nxv32i32_0(<vscale x 32 x i32> %v) {
+; CHECK-LABEL: extractelt_nxv32i32_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 0, e32, m8, ta, mu
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %r = extractelement <vscale x 32 x i32> %v, i32 0
+  ret i32 %r
+}
+
+define i32 @extractelt_nxv32i32_neg1(<vscale x 32 x i32> %v) {
+; CHECK-LABEL: extractelt_nxv32i32_neg1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    addi s0, sp, 64
+; CHECK-NEXT:    .cfi_def_cfa s0, 0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    andi sp, sp, -64
+; CHECK-NEXT:    addi a0, sp, 64
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a2, a1, 3
+; CHECK-NEXT:    add a2, a0, a2
+; CHECK-NEXT:    vs8r.v v16, (a2)
+; CHECK-NEXT:    slli a1, a1, 4
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    lw a0, -4(a0)
+; CHECK-NEXT:    addi sp, s0, -64
+; CHECK-NEXT:    addi sp, sp, 64
+; CHECK-NEXT:    ret
+  %r = extractelement <vscale x 32 x i32> %v, i32 -1
+  ret i32 %r
+}
+
+define i32 @extractelt_nxv32i32_imm(<vscale x 32 x i32> %v) {
+; CHECK-LABEL: extractelt_nxv32i32_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e32, m8, ta, mu
+; CHECK-NEXT:    vslidedown.vi v8, v8, 2
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %r = extractelement <vscale x 32 x i32> %v, i32 2
+  ret i32 %r
+}
+
+define i32 @extractelt_nxv32i32_idx(<vscale x 32 x i32> %v, i32 %idx) {
+; CHECK-LABEL: extractelt_nxv32i32_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a2, a1, 2
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    bltu a0, a2, .LBB74_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a0, a2
+; CHECK-NEXT:  .LBB74_2:
+; CHECK-NEXT:    addi sp, sp, -64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    addi s0, sp, 64
+; CHECK-NEXT:    .cfi_def_cfa s0, 0
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a2, a2, 4
+; CHECK-NEXT:    sub sp, sp, a2
+; CHECK-NEXT:    andi sp, sp, -64
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    addi a2, sp, 64
+; CHECK-NEXT:    add a0, a2, a0
+; CHECK-NEXT:    vs8r.v v8, (a2)
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, a2, a1
+; CHECK-NEXT:    vs8r.v v16, (a1)
+; CHECK-NEXT:    lw a0, 0(a0)
+; CHECK-NEXT:    addi sp, s0, -64
+; CHECK-NEXT:    addi sp, sp, 64
+; CHECK-NEXT:    ret
+  %r = extractelement <vscale x 32 x i32> %v, i32 %idx
+  ret i32 %r
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
index 85a99dbbd4ab..39ac6eb58129 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
@@ -790,3 +790,84 @@ define i32 @extractelt_udiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
   %ext = extractelement <vscale x 4 x i32> %bo, i32 0
   ret i32 %ext
 }
+
+define i64 @extractelt_nxv16i64_0(<vscale x 16 x i64> %v) {
+; CHECK-LABEL: extractelt_nxv16i64_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 0, e64, m8, ta, mu
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %r = extractelement <vscale x 16 x i64> %v, i32 0
+  ret i64 %r
+}
+
+define i64 @extractelt_nxv16i64_neg1(<vscale x 16 x i64> %v) {
+; CHECK-LABEL: extractelt_nxv16i64_neg1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    addi s0, sp, 64
+; CHECK-NEXT:    .cfi_def_cfa s0, 0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    andi sp, sp, -64
+; CHECK-NEXT:    addi a0, sp, 64
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a2, a1, 3
+; CHECK-NEXT:    add a2, a0, a2
+; CHECK-NEXT:    vs8r.v v16, (a2)
+; CHECK-NEXT:    slli a1, a1, 4
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    ld a0, -8(a0)
+; CHECK-NEXT:    addi sp, s0, -64
+; CHECK-NEXT:    addi sp, sp, 64
+; CHECK-NEXT:    ret
+  %r = extractelement <vscale x 16 x i64> %v, i32 -1
+  ret i64 %r
+}
+
+define i64 @extractelt_nxv16i64_imm(<vscale x 16 x i64> %v) {
+; CHECK-LABEL: extractelt_nxv16i64_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, mu
+; CHECK-NEXT:    vslidedown.vi v8, v8, 2
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %r = extractelement <vscale x 16 x i64> %v, i32 2
+  ret i64 %r
+}
+
+define i64 @extractelt_nxv16i64_idx(<vscale x 16 x i64> %v, i32 signext %idx) {
+; CHECK-LABEL: extractelt_nxv16i64_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a2, a1, 1
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    bltu a0, a2, .LBB74_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a0, a2
+; CHECK-NEXT:  .LBB74_2:
+; CHECK-NEXT:    addi sp, sp, -64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    addi s0, sp, 64
+; CHECK-NEXT:    .cfi_def_cfa s0, 0
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a2, a2, 4
+; CHECK-NEXT:    sub sp, sp, a2
+; CHECK-NEXT:    andi sp, sp, -64
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    addi a2, sp, 64
+; CHECK-NEXT:    add a0, a2, a0
+; CHECK-NEXT:    vs8r.v v8, (a2)
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, a2, a1
+; CHECK-NEXT:    vs8r.v v16, (a1)
+; CHECK-NEXT:    ld a0, 0(a0)
+; CHECK-NEXT:    addi sp, s0, -64
+; CHECK-NEXT:    addi sp, sp, 64
+; CHECK-NEXT:    ret
+  %r = extractelement <vscale x 16 x i64> %v, i32 %idx
+  ret i64 %r
+}


        


More information about the llvm-commits mailing list