[llvm] ae42563 - [RISCV][NFC] Merge rv32 and rv64 insertelt/extract fp tests.

via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 26 19:57:24 PDT 2022


Author: jacquesguan
Date: 2022-09-27T10:57:13+08:00
New Revision: ae4256355ec907d62e661f1895b10154942ce8a9

URL: https://github.com/llvm/llvm-project/commit/ae4256355ec907d62e661f1895b10154942ce8a9
DIFF: https://github.com/llvm/llvm-project/commit/ae4256355ec907d62e661f1895b10154942ce8a9.diff

LOG: [RISCV][NFC] Merge rv32 and rv64 insertelt/extract fp tests.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D134631

Added: 
    llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll

Modified: 
    

Removed: 
    llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll
deleted file mode 100644
index d00450d763ec0..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll
+++ /dev/null
@@ -1,550 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s
-
-define half @extractelt_nxv1f16_0(<vscale x 1 x half> %v) {
-; CHECK-LABEL: extractelt_nxv1f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 1 x half> %v, i32 0
-  ret half %r
-}
-
-define half @extractelt_nxv1f16_imm(<vscale x 1 x half> %v) {
-; CHECK-LABEL: extractelt_nxv1f16_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 1 x half> %v, i32 2
-  ret half %r
-}
-
-define half @extractelt_nxv1f16_idx(<vscale x 1 x half> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv1f16_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 1 x half> %v, i32 %idx
-  ret half %r
-}
-
-define half @extractelt_nxv2f16_0(<vscale x 2 x half> %v) {
-; CHECK-LABEL: extractelt_nxv2f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 2 x half> %v, i32 0
-  ret half %r
-}
-
-define half @extractelt_nxv2f16_imm(<vscale x 2 x half> %v) {
-; CHECK-LABEL: extractelt_nxv2f16_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 2 x half> %v, i32 2
-  ret half %r
-}
-
-define half @extractelt_nxv2f16_idx(<vscale x 2 x half> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv2f16_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 2 x half> %v, i32 %idx
-  ret half %r
-}
-
-define half @extractelt_nxv4f16_0(<vscale x 4 x half> %v) {
-; CHECK-LABEL: extractelt_nxv4f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e16, m1, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 4 x half> %v, i32 0
-  ret half %r
-}
-
-define half @extractelt_nxv4f16_imm(<vscale x 4 x half> %v) {
-; CHECK-LABEL: extractelt_nxv4f16_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 4 x half> %v, i32 2
-  ret half %r
-}
-
-define half @extractelt_nxv4f16_idx(<vscale x 4 x half> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv4f16_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 4 x half> %v, i32 %idx
-  ret half %r
-}
-
-define half @extractelt_nxv8f16_0(<vscale x 8 x half> %v) {
-; CHECK-LABEL: extractelt_nxv8f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e16, m2, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 8 x half> %v, i32 0
-  ret half %r
-}
-
-define half @extractelt_nxv8f16_imm(<vscale x 8 x half> %v) {
-; CHECK-LABEL: extractelt_nxv8f16_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m2, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 8 x half> %v, i32 2
-  ret half %r
-}
-
-define half @extractelt_nxv8f16_idx(<vscale x 8 x half> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv8f16_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m2, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 8 x half> %v, i32 %idx
-  ret half %r
-}
-
-define half @extractelt_nxv16f16_0(<vscale x 16 x half> %v) {
-; CHECK-LABEL: extractelt_nxv16f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e16, m4, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 16 x half> %v, i32 0
-  ret half %r
-}
-
-define half @extractelt_nxv16f16_imm(<vscale x 16 x half> %v) {
-; CHECK-LABEL: extractelt_nxv16f16_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m4, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 16 x half> %v, i32 2
-  ret half %r
-}
-
-define half @extractelt_nxv16f16_idx(<vscale x 16 x half> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv16f16_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m4, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 16 x half> %v, i32 %idx
-  ret half %r
-}
-
-define half @extractelt_nxv32f16_0(<vscale x 32 x half> %v) {
-; CHECK-LABEL: extractelt_nxv32f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e16, m8, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 32 x half> %v, i32 0
-  ret half %r
-}
-
-define half @extractelt_nxv32f16_imm(<vscale x 32 x half> %v) {
-; CHECK-LABEL: extractelt_nxv32f16_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m8, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 32 x half> %v, i32 2
-  ret half %r
-}
-
-define half @extractelt_nxv32f16_idx(<vscale x 32 x half> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv32f16_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m8, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 32 x half> %v, i32 %idx
-  ret half %r
-}
-
-define float @extractelt_nxv1f32_0(<vscale x 1 x float> %v) {
-; CHECK-LABEL: extractelt_nxv1f32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 1 x float> %v, i32 0
-  ret float %r
-}
-
-define float @extractelt_nxv1f32_imm(<vscale x 1 x float> %v) {
-; CHECK-LABEL: extractelt_nxv1f32_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 1 x float> %v, i32 2
-  ret float %r
-}
-
-define float @extractelt_nxv1f32_idx(<vscale x 1 x float> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv1f32_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 1 x float> %v, i32 %idx
-  ret float %r
-}
-
-define float @extractelt_nxv2f32_0(<vscale x 2 x float> %v) {
-; CHECK-LABEL: extractelt_nxv2f32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e32, m1, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 2 x float> %v, i32 0
-  ret float %r
-}
-
-define float @extractelt_nxv2f32_imm(<vscale x 2 x float> %v) {
-; CHECK-LABEL: extractelt_nxv2f32_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 2 x float> %v, i32 2
-  ret float %r
-}
-
-define float @extractelt_nxv2f32_idx(<vscale x 2 x float> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv2f32_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 2 x float> %v, i32 %idx
-  ret float %r
-}
-
-define float @extractelt_nxv4f32_0(<vscale x 4 x float> %v) {
-; CHECK-LABEL: extractelt_nxv4f32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 4 x float> %v, i32 0
-  ret float %r
-}
-
-define float @extractelt_nxv4f32_imm(<vscale x 4 x float> %v) {
-; CHECK-LABEL: extractelt_nxv4f32_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 4 x float> %v, i32 2
-  ret float %r
-}
-
-define float @extractelt_nxv4f32_idx(<vscale x 4 x float> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv4f32_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 4 x float> %v, i32 %idx
-  ret float %r
-}
-
-define float @extractelt_nxv8f32_0(<vscale x 8 x float> %v) {
-; CHECK-LABEL: extractelt_nxv8f32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e32, m4, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 8 x float> %v, i32 0
-  ret float %r
-}
-
-define float @extractelt_nxv8f32_imm(<vscale x 8 x float> %v) {
-; CHECK-LABEL: extractelt_nxv8f32_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m4, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 8 x float> %v, i32 2
-  ret float %r
-}
-
-define float @extractelt_nxv8f32_idx(<vscale x 8 x float> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv8f32_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m4, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 8 x float> %v, i32 %idx
-  ret float %r
-}
-
-define float @extractelt_nxv16f32_0(<vscale x 16 x float> %v) {
-; CHECK-LABEL: extractelt_nxv16f32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e32, m8, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 16 x float> %v, i32 0
-  ret float %r
-}
-
-define float @extractelt_nxv16f32_imm(<vscale x 16 x float> %v) {
-; CHECK-LABEL: extractelt_nxv16f32_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m8, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 16 x float> %v, i32 2
-  ret float %r
-}
-
-define float @extractelt_nxv16f32_idx(<vscale x 16 x float> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv16f32_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m8, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 16 x float> %v, i32 %idx
-  ret float %r
-}
-
-define double @extractelt_nxv1f64_0(<vscale x 1 x double> %v) {
-; CHECK-LABEL: extractelt_nxv1f64_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 1 x double> %v, i32 0
-  ret double %r
-}
-
-define double @extractelt_nxv1f64_imm(<vscale x 1 x double> %v) {
-; CHECK-LABEL: extractelt_nxv1f64_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 1 x double> %v, i32 2
-  ret double %r
-}
-
-define double @extractelt_nxv1f64_idx(<vscale x 1 x double> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv1f64_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 1 x double> %v, i32 %idx
-  ret double %r
-}
-
-define double @extractelt_nxv2f64_0(<vscale x 2 x double> %v) {
-; CHECK-LABEL: extractelt_nxv2f64_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e64, m2, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 2 x double> %v, i32 0
-  ret double %r
-}
-
-define double @extractelt_nxv2f64_imm(<vscale x 2 x double> %v) {
-; CHECK-LABEL: extractelt_nxv2f64_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 2 x double> %v, i32 2
-  ret double %r
-}
-
-define double @extractelt_nxv2f64_idx(<vscale x 2 x double> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv2f64_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 2 x double> %v, i32 %idx
-  ret double %r
-}
-
-define double @extractelt_nxv4f64_0(<vscale x 4 x double> %v) {
-; CHECK-LABEL: extractelt_nxv4f64_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e64, m4, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 4 x double> %v, i32 0
-  ret double %r
-}
-
-define double @extractelt_nxv4f64_imm(<vscale x 4 x double> %v) {
-; CHECK-LABEL: extractelt_nxv4f64_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 4 x double> %v, i32 2
-  ret double %r
-}
-
-define double @extractelt_nxv4f64_idx(<vscale x 4 x double> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv4f64_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 4 x double> %v, i32 %idx
-  ret double %r
-}
-
-define double @extractelt_nxv8f64_0(<vscale x 8 x double> %v) {
-; CHECK-LABEL: extractelt_nxv8f64_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e64, m8, ta, mu
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 8 x double> %v, i32 0
-  ret double %r
-}
-
-define double @extractelt_nxv8f64_imm(<vscale x 8 x double> %v) {
-; CHECK-LABEL: extractelt_nxv8f64_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 8 x double> %v, i32 2
-  ret double %r
-}
-
-define double @extractelt_nxv8f64_idx(<vscale x 8 x double> %v, i32 %idx) {
-; CHECK-LABEL: extractelt_nxv8f64_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, mu
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vfmv.f.s fa0, v8
-; CHECK-NEXT:    ret
-  %r = extractelement <vscale x 8 x double> %v, i32 %idx
-  ret double %r
-}
-
-define float @extractelt_fadd_nxv4f32_splat(<vscale x 4 x float> %x) {
-; CHECK-LABEL: extractelt_fadd_nxv4f32_splat:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI45_0)
-; CHECK-NEXT:    flw ft0, %lo(.LCPI45_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s ft1, v8
-; CHECK-NEXT:    fadd.s fa0, ft1, ft0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x float> poison, float 3.0, i32 0
-  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = fadd <vscale x 4 x float> %x, %splat
-  %ext = extractelement <vscale x 4 x float> %bo, i32 2
-  ret float %ext
-}
-
-define float @extractelt_fsub_nxv4f32_splat(<vscale x 4 x float> %x) {
-; CHECK-LABEL: extractelt_fsub_nxv4f32_splat:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI46_0)
-; CHECK-NEXT:    flw ft0, %lo(.LCPI46_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vfmv.f.s ft1, v8
-; CHECK-NEXT:    fsub.s fa0, ft0, ft1
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x float> poison, float 3.0, i32 0
-  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = fsub <vscale x 4 x float> %splat, %x
-  %ext = extractelement <vscale x 4 x float> %bo, i32 1
-  ret float %ext
-}
-
-define float @extractelt_fmul_nxv4f32_splat(<vscale x 4 x float> %x) {
-; CHECK-LABEL: extractelt_fmul_nxv4f32_splat:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI47_0)
-; CHECK-NEXT:    flw ft0, %lo(.LCPI47_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, mu
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vfmv.f.s ft1, v8
-; CHECK-NEXT:    fmul.s fa0, ft1, ft0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x float> poison, float 3.0, i32 0
-  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = fmul <vscale x 4 x float> %x, %splat
-  %ext = extractelement <vscale x 4 x float> %bo, i32 3
-  ret float %ext
-}
-
-define float @extractelt_fdiv_nxv4f32_splat(<vscale x 4 x float> %x) {
-; CHECK-LABEL: extractelt_fdiv_nxv4f32_splat:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI48_0)
-; CHECK-NEXT:    flw ft0, %lo(.LCPI48_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 0, e32, m2, ta, mu
-; CHECK-NEXT:    vfmv.f.s ft1, v8
-; CHECK-NEXT:    fdiv.s fa0, ft1, ft0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x float> poison, float 3.0, i32 0
-  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = fdiv <vscale x 4 x float> %x, %splat
-  %ext = extractelement <vscale x 4 x float> %bo, i32 0
-  ret float %ext
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
similarity index 99%
rename from llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
index 64e996b1097fc..bbc9a8af2ab50 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
@@ -1,4 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll
deleted file mode 100644
index 0c466174fd455..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll
+++ /dev/null
@@ -1,528 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s
-
-define <vscale x 1 x half> @insertelt_nxv1f16_0(<vscale x 1 x half> %v, half %elt) {
-; CHECK-LABEL: insertelt_nxv1f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 1 x half> %v, half %elt, i32 0
-  ret <vscale x 1 x half> %r
-}
-
-define <vscale x 1 x half> @insertelt_nxv1f16_imm(<vscale x 1 x half> %v, half %elt) {
-; CHECK-LABEL: insertelt_nxv1f16_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 1 x half> %v, half %elt, i32 3
-  ret <vscale x 1 x half> %r
-}
-
-define <vscale x 1 x half> @insertelt_nxv1f16_idx(<vscale x 1 x half> %v, half %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv1f16_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 1 x half> %v, half %elt, i32 %idx
-  ret <vscale x 1 x half> %r
-}
-
-define <vscale x 2 x half> @insertelt_nxv2f16_0(<vscale x 2 x half> %v, half %elt) {
-; CHECK-LABEL: insertelt_nxv2f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 2 x half> %v, half %elt, i32 0
-  ret <vscale x 2 x half> %r
-}
-
-define <vscale x 2 x half> @insertelt_nxv2f16_imm(<vscale x 2 x half> %v, half %elt) {
-; CHECK-LABEL: insertelt_nxv2f16_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 2 x half> %v, half %elt, i32 3
-  ret <vscale x 2 x half> %r
-}
-
-define <vscale x 2 x half> @insertelt_nxv2f16_idx(<vscale x 2 x half> %v, half %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv2f16_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 2 x half> %v, half %elt, i32 %idx
-  ret <vscale x 2 x half> %r
-}
-
-define <vscale x 4 x half> @insertelt_nxv4f16_0(<vscale x 4 x half> %v, half %elt) {
-; CHECK-LABEL: insertelt_nxv4f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 4 x half> %v, half %elt, i32 0
-  ret <vscale x 4 x half> %r
-}
-
-define <vscale x 4 x half> @insertelt_nxv4f16_imm(<vscale x 4 x half> %v, half %elt) {
-; CHECK-LABEL: insertelt_nxv4f16_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 4 x half> %v, half %elt, i32 3
-  ret <vscale x 4 x half> %r
-}
-
-define <vscale x 4 x half> @insertelt_nxv4f16_idx(<vscale x 4 x half> %v, half %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv4f16_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 4 x half> %v, half %elt, i32 %idx
-  ret <vscale x 4 x half> %r
-}
-
-define <vscale x 8 x half> @insertelt_nxv8f16_0(<vscale x 8 x half> %v, half %elt) {
-; CHECK-LABEL: insertelt_nxv8f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 8 x half> %v, half %elt, i32 0
-  ret <vscale x 8 x half> %r
-}
-
-define <vscale x 8 x half> @insertelt_nxv8f16_imm(<vscale x 8 x half> %v, half %elt) {
-; CHECK-LABEL: insertelt_nxv8f16_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v10, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 8 x half> %v, half %elt, i32 3
-  ret <vscale x 8 x half> %r
-}
-
-define <vscale x 8 x half> @insertelt_nxv8f16_idx(<vscale x 8 x half> %v, half %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv8f16_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 8 x half> %v, half %elt, i32 %idx
-  ret <vscale x 8 x half> %r
-}
-
-define <vscale x 16 x half> @insertelt_nxv16f16_0(<vscale x 16 x half> %v, half %elt) {
-; CHECK-LABEL: insertelt_nxv16f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 16 x half> %v, half %elt, i32 0
-  ret <vscale x 16 x half> %r
-}
-
-define <vscale x 16 x half> @insertelt_nxv16f16_imm(<vscale x 16 x half> %v, half %elt) {
-; CHECK-LABEL: insertelt_nxv16f16_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v12, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 16 x half> %v, half %elt, i32 3
-  ret <vscale x 16 x half> %r
-}
-
-define <vscale x 16 x half> @insertelt_nxv16f16_idx(<vscale x 16 x half> %v, half %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv16f16_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 16 x half> %v, half %elt, i32 %idx
-  ret <vscale x 16 x half> %r
-}
-
-define <vscale x 32 x half> @insertelt_nxv32f16_0(<vscale x 32 x half> %v, half %elt) {
-; CHECK-LABEL: insertelt_nxv32f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 32 x half> %v, half %elt, i32 0
-  ret <vscale x 32 x half> %r
-}
-
-define <vscale x 32 x half> @insertelt_nxv32f16_imm(<vscale x 32 x half> %v, half %elt) {
-; CHECK-LABEL: insertelt_nxv32f16_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vfmv.s.f v16, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m8, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v16, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 32 x half> %v, half %elt, i32 3
-  ret <vscale x 32 x half> %r
-}
-
-define <vscale x 32 x half> @insertelt_nxv32f16_idx(<vscale x 32 x half> %v, half %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv32f16_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vfmv.s.f v16, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 32 x half> %v, half %elt, i32 %idx
-  ret <vscale x 32 x half> %r
-}
-
-define <vscale x 1 x float> @insertelt_nxv1f32_0(<vscale x 1 x float> %v, float %elt) {
-; CHECK-LABEL: insertelt_nxv1f32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 1 x float> %v, float %elt, i32 0
-  ret <vscale x 1 x float> %r
-}
-
-define <vscale x 1 x float> @insertelt_nxv1f32_imm(<vscale x 1 x float> %v, float %elt) {
-; CHECK-LABEL: insertelt_nxv1f32_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 1 x float> %v, float %elt, i32 3
-  ret <vscale x 1 x float> %r
-}
-
-define <vscale x 1 x float> @insertelt_nxv1f32_idx(<vscale x 1 x float> %v, float %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv1f32_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 1 x float> %v, float %elt, i32 %idx
-  ret <vscale x 1 x float> %r
-}
-
-define <vscale x 2 x float> @insertelt_nxv2f32_0(<vscale x 2 x float> %v, float %elt) {
-; CHECK-LABEL: insertelt_nxv2f32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 2 x float> %v, float %elt, i32 0
-  ret <vscale x 2 x float> %r
-}
-
-define <vscale x 2 x float> @insertelt_nxv2f32_imm(<vscale x 2 x float> %v, float %elt) {
-; CHECK-LABEL: insertelt_nxv2f32_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 2 x float> %v, float %elt, i32 3
-  ret <vscale x 2 x float> %r
-}
-
-define <vscale x 2 x float> @insertelt_nxv2f32_idx(<vscale x 2 x float> %v, float %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv2f32_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 2 x float> %v, float %elt, i32 %idx
-  ret <vscale x 2 x float> %r
-}
-
-define <vscale x 4 x float> @insertelt_nxv4f32_0(<vscale x 4 x float> %v, float %elt) {
-; CHECK-LABEL: insertelt_nxv4f32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 4 x float> %v, float %elt, i32 0
-  ret <vscale x 4 x float> %r
-}
-
-define <vscale x 4 x float> @insertelt_nxv4f32_imm(<vscale x 4 x float> %v, float %elt) {
-; CHECK-LABEL: insertelt_nxv4f32_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v10, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 4 x float> %v, float %elt, i32 3
-  ret <vscale x 4 x float> %r
-}
-
-define <vscale x 4 x float> @insertelt_nxv4f32_idx(<vscale x 4 x float> %v, float %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv4f32_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 4 x float> %v, float %elt, i32 %idx
-  ret <vscale x 4 x float> %r
-}
-
-define <vscale x 8 x float> @insertelt_nxv8f32_0(<vscale x 8 x float> %v, float %elt) {
-; CHECK-LABEL: insertelt_nxv8f32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 8 x float> %v, float %elt, i32 0
-  ret <vscale x 8 x float> %r
-}
-
-define <vscale x 8 x float> @insertelt_nxv8f32_imm(<vscale x 8 x float> %v, float %elt) {
-; CHECK-LABEL: insertelt_nxv8f32_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v12, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 8 x float> %v, float %elt, i32 3
-  ret <vscale x 8 x float> %r
-}
-
-define <vscale x 8 x float> @insertelt_nxv8f32_idx(<vscale x 8 x float> %v, float %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv8f32_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 8 x float> %v, float %elt, i32 %idx
-  ret <vscale x 8 x float> %r
-}
-
-define <vscale x 16 x float> @insertelt_nxv16f32_0(<vscale x 16 x float> %v, float %elt) {
-; CHECK-LABEL: insertelt_nxv16f32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 16 x float> %v, float %elt, i32 0
-  ret <vscale x 16 x float> %r
-}
-
-define <vscale x 16 x float> @insertelt_nxv16f32_imm(<vscale x 16 x float> %v, float %elt) {
-; CHECK-LABEL: insertelt_nxv16f32_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfmv.s.f v16, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m8, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v16, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 16 x float> %v, float %elt, i32 3
-  ret <vscale x 16 x float> %r
-}
-
-define <vscale x 16 x float> @insertelt_nxv16f32_idx(<vscale x 16 x float> %v, float %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv16f32_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfmv.s.f v16, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 16 x float> %v, float %elt, i32 %idx
-  ret <vscale x 16 x float> %r
-}
-
-define <vscale x 1 x double> @insertelt_nxv1f64_0(<vscale x 1 x double> %v, double %elt) {
-; CHECK-LABEL: insertelt_nxv1f64_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 1 x double> %v, double %elt, i32 0
-  ret <vscale x 1 x double> %r
-}
-
-define <vscale x 1 x double> @insertelt_nxv1f64_imm(<vscale x 1 x double> %v, double %elt) {
-; CHECK-LABEL: insertelt_nxv1f64_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m1, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v9, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 1 x double> %v, double %elt, i32 3
-  ret <vscale x 1 x double> %r
-}
-
-define <vscale x 1 x double> @insertelt_nxv1f64_idx(<vscale x 1 x double> %v, double %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv1f64_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 1 x double> %v, double %elt, i32 %idx
-  ret <vscale x 1 x double> %r
-}
-
-define <vscale x 2 x double> @insertelt_nxv2f64_0(<vscale x 2 x double> %v, double %elt) {
-; CHECK-LABEL: insertelt_nxv2f64_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 2 x double> %v, double %elt, i32 0
-  ret <vscale x 2 x double> %r
-}
-
-define <vscale x 2 x double> @insertelt_nxv2f64_imm(<vscale x 2 x double> %v, double %elt) {
-; CHECK-LABEL: insertelt_nxv2f64_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v10, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 2 x double> %v, double %elt, i32 3
-  ret <vscale x 2 x double> %r
-}
-
-define <vscale x 2 x double> @insertelt_nxv2f64_idx(<vscale x 2 x double> %v, double %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv2f64_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 2 x double> %v, double %elt, i32 %idx
-  ret <vscale x 2 x double> %r
-}
-
-define <vscale x 4 x double> @insertelt_nxv4f64_0(<vscale x 4 x double> %v, double %elt) {
-; CHECK-LABEL: insertelt_nxv4f64_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 4 x double> %v, double %elt, i32 0
-  ret <vscale x 4 x double> %r
-}
-
-define <vscale x 4 x double> @insertelt_nxv4f64_imm(<vscale x 4 x double> %v, double %elt) {
-; CHECK-LABEL: insertelt_nxv4f64_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m4, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v12, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 4 x double> %v, double %elt, i32 3
-  ret <vscale x 4 x double> %r
-}
-
-define <vscale x 4 x double> @insertelt_nxv4f64_idx(<vscale x 4 x double> %v, double %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv4f64_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 4 x double> %v, double %elt, i32 %idx
-  ret <vscale x 4 x double> %r
-}
-
-define <vscale x 8 x double> @insertelt_nxv8f64_0(<vscale x 8 x double> %v, double %elt) {
-; CHECK-LABEL: insertelt_nxv8f64_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, tu, mu
-; CHECK-NEXT:    vfmv.s.f v8, fa0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 8 x double> %v, double %elt, i32 0
-  ret <vscale x 8 x double> %r
-}
-
-define <vscale x 8 x double> @insertelt_nxv8f64_imm(<vscale x 8 x double> %v, double %elt) {
-; CHECK-LABEL: insertelt_nxv8f64_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vfmv.s.f v16, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m8, tu, mu
-; CHECK-NEXT:    vslideup.vi v8, v16, 3
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 8 x double> %v, double %elt, i32 3
-  ret <vscale x 8 x double> %r
-}
-
-define <vscale x 8 x double> @insertelt_nxv8f64_idx(<vscale x 8 x double> %v, double %elt, i32 %idx) {
-; CHECK-LABEL: insertelt_nxv8f64_idx:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vfmv.s.f v16, fa0
-; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
-  %r = insertelement <vscale x 8 x double> %v, double %elt, i32 %idx
-  ret <vscale x 8 x double> %r
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll
similarity index 99%
rename from llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll
index ebf7f42983628..5ca8cd3fcf303 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll
@@ -1,4 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 


        


More information about the llvm-commits mailing list