[llvm] a3beae6 - [RISCV] Add test cases for fixed vector extract element with non-constant index. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 25 22:37:17 PST 2021


Author: Craig Topper
Date: 2021-02-25T22:36:38-08:00
New Revision: a3beae6171ff1982cc2e1eafdc0fbfcab992577a

URL: https://github.com/llvm/llvm-project/commit/a3beae6171ff1982cc2e1eafdc0fbfcab992577a
DIFF: https://github.com/llvm/llvm-project/commit/a3beae6171ff1982cc2e1eafdc0fbfcab992577a.diff

LOG: [RISCV] Add test cases for fixed vector extract element with non-constant index. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
index 5c1144880af4..62b61ff08459 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -349,3 +349,397 @@ define i64 @extractelt_v3i64(<3 x i64>* %x) nounwind {
   %b = extractelement <3 x i64> %a, i32 2
   ret i64 %b
 }
+
+define i8 @extractelt_v16i8_idx(<16 x i8>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v16i8_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
+; RV32-NEXT:    vle8.v v25, (a0)
+; RV32-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
+; RV32-NEXT:    vslidedown.vx v25, v25, a1
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v16i8_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
+; RV64-NEXT:    vle8.v v25, (a0)
+; RV64-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
+; RV64-NEXT:    vslidedown.vx v25, v25, a1
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    ret
+  %a = load <16 x i8>, <16 x i8>* %x
+  %b = extractelement <16 x i8> %a, i32 %idx
+  ret i8 %b
+}
+
+define i16 @extractelt_v8i16_idx(<8 x i16>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v8i16_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
+; RV32-NEXT:    vle16.v v25, (a0)
+; RV32-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
+; RV32-NEXT:    vslidedown.vx v25, v25, a1
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v8i16_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
+; RV64-NEXT:    vle16.v v25, (a0)
+; RV64-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
+; RV64-NEXT:    vslidedown.vx v25, v25, a1
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    ret
+  %a = load <8 x i16>, <8 x i16>* %x
+  %b = extractelement <8 x i16> %a, i32 %idx
+  ret i16 %b
+}
+
+define i32 @extractelt_v4i32_idx(<4 x i32>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v4i32_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; RV32-NEXT:    vle32.v v25, (a0)
+; RV32-NEXT:    vadd.vv v25, v25, v25
+; RV32-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
+; RV32-NEXT:    vslidedown.vx v25, v25, a1
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v4i32_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; RV64-NEXT:    vle32.v v25, (a0)
+; RV64-NEXT:    vadd.vv v25, v25, v25
+; RV64-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
+; RV64-NEXT:    vslidedown.vx v25, v25, a1
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    ret
+  %a = load <4 x i32>, <4 x i32>* %x
+  %b = add <4 x i32> %a, %a
+  %c = extractelement <4 x i32> %b, i32 %idx
+  ret i32 %c
+}
+
+define i64 @extractelt_v2i64_idx(<2 x i64>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v2i64_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
+; RV32-NEXT:    vle64.v v25, (a0)
+; RV32-NEXT:    vadd.vv v25, v25, v25
+; RV32-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
+; RV32-NEXT:    vslidedown.vx v25, v25, a1
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    vsrl.vx v25, v25, a1
+; RV32-NEXT:    vmv.x.s a1, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v2i64_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
+; RV64-NEXT:    vle64.v v25, (a0)
+; RV64-NEXT:    vadd.vv v25, v25, v25
+; RV64-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
+; RV64-NEXT:    vslidedown.vx v25, v25, a1
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = add <2 x i64> %a, %a
+  %c = extractelement <2 x i64> %b, i32 %idx
+  ret i64 %c
+}
+
+define half @extractelt_v8f16_idx(<8 x half>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v8f16_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
+; RV32-NEXT:    vle16.v v25, (a0)
+; RV32-NEXT:    vfadd.vv v25, v25, v25
+; RV32-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
+; RV32-NEXT:    vslidedown.vx v25, v25, a1
+; RV32-NEXT:    vfmv.f.s fa0, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v8f16_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
+; RV64-NEXT:    vle16.v v25, (a0)
+; RV64-NEXT:    vfadd.vv v25, v25, v25
+; RV64-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
+; RV64-NEXT:    vslidedown.vx v25, v25, a1
+; RV64-NEXT:    vfmv.f.s fa0, v25
+; RV64-NEXT:    ret
+  %a = load <8 x half>, <8 x half>* %x
+  %b = fadd <8 x half> %a, %a
+  %c = extractelement <8 x half> %b, i32 %idx
+  ret half %c
+}
+
+define float @extractelt_v4f32_idx(<4 x float>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v4f32_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; RV32-NEXT:    vle32.v v25, (a0)
+; RV32-NEXT:    vfadd.vv v25, v25, v25
+; RV32-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
+; RV32-NEXT:    vslidedown.vx v25, v25, a1
+; RV32-NEXT:    vfmv.f.s fa0, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v4f32_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; RV64-NEXT:    vle32.v v25, (a0)
+; RV64-NEXT:    vfadd.vv v25, v25, v25
+; RV64-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
+; RV64-NEXT:    vslidedown.vx v25, v25, a1
+; RV64-NEXT:    vfmv.f.s fa0, v25
+; RV64-NEXT:    ret
+  %a = load <4 x float>, <4 x float>* %x
+  %b = fadd <4 x float> %a, %a
+  %c = extractelement <4 x float> %b, i32 %idx
+  ret float %c
+}
+
+define double @extractelt_v2f64_idx(<2 x double>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v2f64_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
+; RV32-NEXT:    vle64.v v25, (a0)
+; RV32-NEXT:    vfadd.vv v25, v25, v25
+; RV32-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
+; RV32-NEXT:    vslidedown.vx v25, v25, a1
+; RV32-NEXT:    vfmv.f.s fa0, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v2f64_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
+; RV64-NEXT:    vle64.v v25, (a0)
+; RV64-NEXT:    vfadd.vv v25, v25, v25
+; RV64-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
+; RV64-NEXT:    vslidedown.vx v25, v25, a1
+; RV64-NEXT:    vfmv.f.s fa0, v25
+; RV64-NEXT:    ret
+  %a = load <2 x double>, <2 x double>* %x
+  %b = fadd <2 x double> %a, %a
+  %c = extractelement <2 x double> %b, i32 %idx
+  ret double %c
+}
+
+define i8 @extractelt_v32i8_idx(<32 x i8>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v32i8_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi a2, zero, 32
+; RV32-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; RV32-NEXT:    vle8.v v26, (a0)
+; RV32-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
+; RV32-NEXT:    vslidedown.vx v26, v26, a1
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v32i8_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a2, zero, 32
+; RV64-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; RV64-NEXT:    vle8.v v26, (a0)
+; RV64-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
+; RV64-NEXT:    vslidedown.vx v26, v26, a1
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    ret
+  %a = load <32 x i8>, <32 x i8>* %x
+  %b = extractelement <32 x i8> %a, i32 %idx
+  ret i8 %b
+}
+
+define i16 @extractelt_v16i16_idx(<16 x i16>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v16i16_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
+; RV32-NEXT:    vle16.v v26, (a0)
+; RV32-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
+; RV32-NEXT:    vslidedown.vx v26, v26, a1
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v16i16_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
+; RV64-NEXT:    vle16.v v26, (a0)
+; RV64-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
+; RV64-NEXT:    vslidedown.vx v26, v26, a1
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    ret
+  %a = load <16 x i16>, <16 x i16>* %x
+  %b = extractelement <16 x i16> %a, i32 %idx
+  ret i16 %b
+}
+
+define i32 @extractelt_v8i32_idx(<8 x i32>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v8i32_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
+; RV32-NEXT:    vle32.v v26, (a0)
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
+; RV32-NEXT:    vslidedown.vx v26, v26, a1
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v8i32_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
+; RV64-NEXT:    vle32.v v26, (a0)
+; RV64-NEXT:    vadd.vv v26, v26, v26
+; RV64-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
+; RV64-NEXT:    vslidedown.vx v26, v26, a1
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    ret
+  %a = load <8 x i32>, <8 x i32>* %x
+  %b = add <8 x i32> %a, %a
+  %c = extractelement <8 x i32> %b, i32 %idx
+  ret i32 %c
+}
+
+define i64 @extractelt_v4i64_idx(<4 x i64>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v4i64_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
+; RV32-NEXT:    vle64.v v26, (a0)
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
+; RV32-NEXT:    vslidedown.vx v26, v26, a1
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    vsrl.vx v26, v26, a1
+; RV32-NEXT:    vmv.x.s a1, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v4i64_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
+; RV64-NEXT:    vle64.v v26, (a0)
+; RV64-NEXT:    vadd.vv v26, v26, v26
+; RV64-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
+; RV64-NEXT:    vslidedown.vx v26, v26, a1
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    ret
+  %a = load <4 x i64>, <4 x i64>* %x
+  %b = add <4 x i64> %a, %a
+  %c = extractelement <4 x i64> %b, i32 %idx
+  ret i64 %c
+}
+
+define half @extractelt_v16f16_idx(<16 x half>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v16f16_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
+; RV32-NEXT:    vle16.v v26, (a0)
+; RV32-NEXT:    vfadd.vv v26, v26, v26
+; RV32-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
+; RV32-NEXT:    vslidedown.vx v26, v26, a1
+; RV32-NEXT:    vfmv.f.s fa0, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v16f16_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
+; RV64-NEXT:    vle16.v v26, (a0)
+; RV64-NEXT:    vfadd.vv v26, v26, v26
+; RV64-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
+; RV64-NEXT:    vslidedown.vx v26, v26, a1
+; RV64-NEXT:    vfmv.f.s fa0, v26
+; RV64-NEXT:    ret
+  %a = load <16 x half>, <16 x half>* %x
+  %b = fadd <16 x half> %a, %a
+  %c = extractelement <16 x half> %b, i32 %idx
+  ret half %c
+}
+
+define float @extractelt_v8f32_idx(<8 x float>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v8f32_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
+; RV32-NEXT:    vle32.v v26, (a0)
+; RV32-NEXT:    vfadd.vv v26, v26, v26
+; RV32-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
+; RV32-NEXT:    vslidedown.vx v26, v26, a1
+; RV32-NEXT:    vfmv.f.s fa0, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v8f32_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
+; RV64-NEXT:    vle32.v v26, (a0)
+; RV64-NEXT:    vfadd.vv v26, v26, v26
+; RV64-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
+; RV64-NEXT:    vslidedown.vx v26, v26, a1
+; RV64-NEXT:    vfmv.f.s fa0, v26
+; RV64-NEXT:    ret
+  %a = load <8 x float>, <8 x float>* %x
+  %b = fadd <8 x float> %a, %a
+  %c = extractelement <8 x float> %b, i32 %idx
+  ret float %c
+}
+
+define double @extractelt_v4f64_idx(<4 x double>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v4f64_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
+; RV32-NEXT:    vle64.v v26, (a0)
+; RV32-NEXT:    vfadd.vv v26, v26, v26
+; RV32-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
+; RV32-NEXT:    vslidedown.vx v26, v26, a1
+; RV32-NEXT:    vfmv.f.s fa0, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v4f64_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
+; RV64-NEXT:    vle64.v v26, (a0)
+; RV64-NEXT:    vfadd.vv v26, v26, v26
+; RV64-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
+; RV64-NEXT:    vslidedown.vx v26, v26, a1
+; RV64-NEXT:    vfmv.f.s fa0, v26
+; RV64-NEXT:    ret
+  %a = load <4 x double>, <4 x double>* %x
+  %b = fadd <4 x double> %a, %a
+  %c = extractelement <4 x double> %b, i32 %idx
+  ret double %c
+}
+
+; This uses a non-power of 2 type so that it isn't an MVT to catch an
+; incorrect use of getSimpleValueType_idx(, i32 signext %idx).
+; NOTE: Type legalization is bitcasting to vXi32 and doing 2 independent
+; slidedowns and extracts.
+define i64 @extractelt_v3i64_idx(<3 x i64>* %x, i32 signext %idx) nounwind {
+; RV32-LABEL: extractelt_v3i64_idx:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
+; RV32-NEXT:    vle64.v v26, (a0)
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    add a1, a1, a1
+; RV32-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
+; RV32-NEXT:    vslidedown.vx v28, v26, a1
+; RV32-NEXT:    vmv.x.s a0, v28
+; RV32-NEXT:    addi a1, a1, 1
+; RV32-NEXT:    vslidedown.vx v26, v26, a1
+; RV32-NEXT:    vmv.x.s a1, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v3i64_idx:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
+; RV64-NEXT:    vle64.v v26, (a0)
+; RV64-NEXT:    vadd.vv v26, v26, v26
+; RV64-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
+; RV64-NEXT:    vslidedown.vx v26, v26, a1
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    ret
+  %a = load <3 x i64>, <3 x i64>* %x
+  %b = add <3 x i64> %a, %a
+  %c = extractelement <3 x i64> %b, i32 %idx
+  ret i64 %c
+}


        


More information about the llvm-commits mailing list