[llvm] 1d85b24 - [RISCV][NFC] Merge RV32/RV64 test checks with a common prefix

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 30 01:51:38 PDT 2021


Author: Fraser Cormack
Date: 2021-04-30T09:43:48+01:00
New Revision: 1d85b247628521f52a9cb8c9a7ccd3ea480a88ec

URL: https://github.com/llvm/llvm-project/commit/1d85b247628521f52a9cb8c9a7ccd3ea480a88ec
DIFF: https://github.com/llvm/llvm-project/commit/1d85b247628521f52a9cb8c9a7ccd3ea480a88ec.diff

LOG: [RISCV][NFC] Merge RV32/RV64 test checks with a common prefix

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
index b33265082735..c70aac550c14 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -1,71 +1,44 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 define i8 @extractelt_v16i8(<16 x i8>* %x) nounwind {
-; RV32-LABEL: extractelt_v16i8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
-; RV32-NEXT:    vle8.v v25, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
-; RV32-NEXT:    vslidedown.vi v25, v25, 7
-; RV32-NEXT:    vmv.x.s a0, v25
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v16i8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
-; RV64-NEXT:    vle8.v v25, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
-; RV64-NEXT:    vslidedown.vi v25, v25, 7
-; RV64-NEXT:    vmv.x.s a0, v25
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
+; CHECK-NEXT:    vle8.v v25, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
+; CHECK-NEXT:    vslidedown.vi v25, v25, 7
+; CHECK-NEXT:    vmv.x.s a0, v25
+; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
   %b = extractelement <16 x i8> %a, i32 7
   ret i8 %b
 }
 
 define i16 @extractelt_v8i16(<8 x i16>* %x) nounwind {
-; RV32-LABEL: extractelt_v8i16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
-; RV32-NEXT:    vle16.v v25, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
-; RV32-NEXT:    vslidedown.vi v25, v25, 7
-; RV32-NEXT:    vmv.x.s a0, v25
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v8i16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
-; RV64-NEXT:    vle16.v v25, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
-; RV64-NEXT:    vslidedown.vi v25, v25, 7
-; RV64-NEXT:    vmv.x.s a0, v25
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
+; CHECK-NEXT:    vle16.v v25, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
+; CHECK-NEXT:    vslidedown.vi v25, v25, 7
+; CHECK-NEXT:    vmv.x.s a0, v25
+; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
   %b = extractelement <8 x i16> %a, i32 7
   ret i16 %b
 }
 
 define i32 @extractelt_v4i32(<4 x i32>* %x) nounwind {
-; RV32-LABEL: extractelt_v4i32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
-; RV32-NEXT:    vle32.v v25, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
-; RV32-NEXT:    vslidedown.vi v25, v25, 2
-; RV32-NEXT:    vmv.x.s a0, v25
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v4i32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
-; RV64-NEXT:    vle32.v v25, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
-; RV64-NEXT:    vslidedown.vi v25, v25, 2
-; RV64-NEXT:    vmv.x.s a0, v25
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
+; CHECK-NEXT:    vle32.v v25, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
+; CHECK-NEXT:    vslidedown.vi v25, v25, 2
+; CHECK-NEXT:    vmv.x.s a0, v25
+; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
   %b = extractelement <4 x i32> %a, i32 2
   ret i32 %b
@@ -95,136 +68,83 @@ define i64 @extractelt_v2i64(<2 x i64>* %x) nounwind {
 }
 
 define half @extractelt_v8f16(<8 x half>* %x) nounwind {
-; RV32-LABEL: extractelt_v8f16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
-; RV32-NEXT:    vle16.v v25, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
-; RV32-NEXT:    vslidedown.vi v25, v25, 7
-; RV32-NEXT:    vfmv.f.s fa0, v25
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v8f16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
-; RV64-NEXT:    vle16.v v25, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
-; RV64-NEXT:    vslidedown.vi v25, v25, 7
-; RV64-NEXT:    vfmv.f.s fa0, v25
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
+; CHECK-NEXT:    vle16.v v25, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
+; CHECK-NEXT:    vslidedown.vi v25, v25, 7
+; CHECK-NEXT:    vfmv.f.s fa0, v25
+; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %b = extractelement <8 x half> %a, i32 7
   ret half %b
 }
 
 define float @extractelt_v4f32(<4 x float>* %x) nounwind {
-; RV32-LABEL: extractelt_v4f32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
-; RV32-NEXT:    vle32.v v25, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
-; RV32-NEXT:    vslidedown.vi v25, v25, 2
-; RV32-NEXT:    vfmv.f.s fa0, v25
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v4f32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
-; RV64-NEXT:    vle32.v v25, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
-; RV64-NEXT:    vslidedown.vi v25, v25, 2
-; RV64-NEXT:    vfmv.f.s fa0, v25
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
+; CHECK-NEXT:    vle32.v v25, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
+; CHECK-NEXT:    vslidedown.vi v25, v25, 2
+; CHECK-NEXT:    vfmv.f.s fa0, v25
+; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
   %b = extractelement <4 x float> %a, i32 2
   ret float %b
 }
 
 define double @extractelt_v2f64(<2 x double>* %x) nounwind {
-; RV32-LABEL: extractelt_v2f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
-; RV32-NEXT:    vle64.v v25, (a0)
-; RV32-NEXT:    vfmv.f.s fa0, v25
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v2f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
-; RV64-NEXT:    vle64.v v25, (a0)
-; RV64-NEXT:    vfmv.f.s fa0, v25
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; CHECK-NEXT:    vle64.v v25, (a0)
+; CHECK-NEXT:    vfmv.f.s fa0, v25
+; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
   %b = extractelement <2 x double> %a, i32 0
   ret double %b
 }
 
 define i8 @extractelt_v32i8(<32 x i8>* %x) nounwind {
-; RV32-LABEL: extractelt_v32i8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 32
-; RV32-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; RV32-NEXT:    vle8.v v26, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
-; RV32-NEXT:    vslidedown.vi v26, v26, 7
-; RV32-NEXT:    vmv.x.s a0, v26
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v32i8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 32
-; RV64-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; RV64-NEXT:    vle8.v v26, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
-; RV64-NEXT:    vslidedown.vi v26, v26, 7
-; RV64-NEXT:    vmv.x.s a0, v26
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vle8.v v26, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
+; CHECK-NEXT:    vslidedown.vi v26, v26, 7
+; CHECK-NEXT:    vmv.x.s a0, v26
+; CHECK-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
   %b = extractelement <32 x i8> %a, i32 7
   ret i8 %b
 }
 
 define i16 @extractelt_v16i16(<16 x i16>* %x) nounwind {
-; RV32-LABEL: extractelt_v16i16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
-; RV32-NEXT:    vle16.v v26, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
-; RV32-NEXT:    vslidedown.vi v26, v26, 7
-; RV32-NEXT:    vmv.x.s a0, v26
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v16i16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
-; RV64-NEXT:    vle16.v v26, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
-; RV64-NEXT:    vslidedown.vi v26, v26, 7
-; RV64-NEXT:    vmv.x.s a0, v26
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
+; CHECK-NEXT:    vle16.v v26, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
+; CHECK-NEXT:    vslidedown.vi v26, v26, 7
+; CHECK-NEXT:    vmv.x.s a0, v26
+; CHECK-NEXT:    ret
   %a = load <16 x i16>, <16 x i16>* %x
   %b = extractelement <16 x i16> %a, i32 7
   ret i16 %b
 }
 
 define i32 @extractelt_v8i32(<8 x i32>* %x) nounwind {
-; RV32-LABEL: extractelt_v8i32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
-; RV32-NEXT:    vle32.v v26, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
-; RV32-NEXT:    vslidedown.vi v26, v26, 6
-; RV32-NEXT:    vmv.x.s a0, v26
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v8i32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
-; RV64-NEXT:    vle32.v v26, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
-; RV64-NEXT:    vslidedown.vi v26, v26, 6
-; RV64-NEXT:    vmv.x.s a0, v26
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
+; CHECK-NEXT:    vle32.v v26, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
+; CHECK-NEXT:    vslidedown.vi v26, v26, 6
+; CHECK-NEXT:    vmv.x.s a0, v26
+; CHECK-NEXT:    ret
   %a = load <8 x i32>, <8 x i32>* %x
   %b = extractelement <8 x i32> %a, i32 6
   ret i32 %b
@@ -257,65 +177,40 @@ define i64 @extractelt_v4i64(<4 x i64>* %x) nounwind {
 }
 
 define half @extractelt_v16f16(<16 x half>* %x) nounwind {
-; RV32-LABEL: extractelt_v16f16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
-; RV32-NEXT:    vle16.v v26, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
-; RV32-NEXT:    vslidedown.vi v26, v26, 7
-; RV32-NEXT:    vfmv.f.s fa0, v26
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v16f16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
-; RV64-NEXT:    vle16.v v26, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
-; RV64-NEXT:    vslidedown.vi v26, v26, 7
-; RV64-NEXT:    vfmv.f.s fa0, v26
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
+; CHECK-NEXT:    vle16.v v26, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
+; CHECK-NEXT:    vslidedown.vi v26, v26, 7
+; CHECK-NEXT:    vfmv.f.s fa0, v26
+; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
   %b = extractelement <16 x half> %a, i32 7
   ret half %b
 }
 
 define float @extractelt_v8f32(<8 x float>* %x) nounwind {
-; RV32-LABEL: extractelt_v8f32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
-; RV32-NEXT:    vle32.v v26, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
-; RV32-NEXT:    vslidedown.vi v26, v26, 2
-; RV32-NEXT:    vfmv.f.s fa0, v26
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v8f32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
-; RV64-NEXT:    vle32.v v26, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
-; RV64-NEXT:    vslidedown.vi v26, v26, 2
-; RV64-NEXT:    vfmv.f.s fa0, v26
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
+; CHECK-NEXT:    vle32.v v26, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
+; CHECK-NEXT:    vslidedown.vi v26, v26, 2
+; CHECK-NEXT:    vfmv.f.s fa0, v26
+; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
   %b = extractelement <8 x float> %a, i32 2
   ret float %b
 }
 
 define double @extractelt_v4f64(<4 x double>* %x) nounwind {
-; RV32-LABEL: extractelt_v4f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
-; RV32-NEXT:    vle64.v v26, (a0)
-; RV32-NEXT:    vfmv.f.s fa0, v26
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v4f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
-; RV64-NEXT:    vle64.v v26, (a0)
-; RV64-NEXT:    vfmv.f.s fa0, v26
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
+; CHECK-NEXT:    vle64.v v26, (a0)
+; CHECK-NEXT:    vfmv.f.s fa0, v26
+; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
   %b = extractelement <4 x double> %a, i32 0
   ret double %b
@@ -351,71 +246,43 @@ define i64 @extractelt_v3i64(<3 x i64>* %x) nounwind {
 }
 
 define i8 @extractelt_v16i8_idx(<16 x i8>* %x, i32 signext %idx) nounwind {
-; RV32-LABEL: extractelt_v16i8_idx:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
-; RV32-NEXT:    vle8.v v25, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
-; RV32-NEXT:    vslidedown.vx v25, v25, a1
-; RV32-NEXT:    vmv.x.s a0, v25
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v16i8_idx:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
-; RV64-NEXT:    vle8.v v25, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
-; RV64-NEXT:    vslidedown.vx v25, v25, a1
-; RV64-NEXT:    vmv.x.s a0, v25
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v16i8_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
+; CHECK-NEXT:    vle8.v v25, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
+; CHECK-NEXT:    vslidedown.vx v25, v25, a1
+; CHECK-NEXT:    vmv.x.s a0, v25
+; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
   %b = extractelement <16 x i8> %a, i32 %idx
   ret i8 %b
 }
 
 define i16 @extractelt_v8i16_idx(<8 x i16>* %x, i32 signext %idx) nounwind {
-; RV32-LABEL: extractelt_v8i16_idx:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
-; RV32-NEXT:    vle16.v v25, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
-; RV32-NEXT:    vslidedown.vx v25, v25, a1
-; RV32-NEXT:    vmv.x.s a0, v25
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v8i16_idx:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
-; RV64-NEXT:    vle16.v v25, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
-; RV64-NEXT:    vslidedown.vx v25, v25, a1
-; RV64-NEXT:    vmv.x.s a0, v25
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v8i16_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
+; CHECK-NEXT:    vle16.v v25, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
+; CHECK-NEXT:    vslidedown.vx v25, v25, a1
+; CHECK-NEXT:    vmv.x.s a0, v25
+; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
   %b = extractelement <8 x i16> %a, i32 %idx
   ret i16 %b
 }
 
 define i32 @extractelt_v4i32_idx(<4 x i32>* %x, i32 signext %idx) nounwind {
-; RV32-LABEL: extractelt_v4i32_idx:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
-; RV32-NEXT:    vle32.v v25, (a0)
-; RV32-NEXT:    vadd.vv v25, v25, v25
-; RV32-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
-; RV32-NEXT:    vslidedown.vx v25, v25, a1
-; RV32-NEXT:    vmv.x.s a0, v25
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v4i32_idx:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
-; RV64-NEXT:    vle32.v v25, (a0)
-; RV64-NEXT:    vadd.vv v25, v25, v25
-; RV64-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
-; RV64-NEXT:    vslidedown.vx v25, v25, a1
-; RV64-NEXT:    vmv.x.s a0, v25
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v4i32_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; CHECK-NEXT:    vle32.v v25, (a0)
+; CHECK-NEXT:    vadd.vv v25, v25, v25
+; CHECK-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
+; CHECK-NEXT:    vslidedown.vx v25, v25, a1
+; CHECK-NEXT:    vmv.x.s a0, v25
+; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
   %b = add <4 x i32> %a, %a
   %c = extractelement <4 x i32> %b, i32 %idx
@@ -452,25 +319,15 @@ define i64 @extractelt_v2i64_idx(<2 x i64>* %x, i32 signext %idx) nounwind {
 }
 
 define half @extractelt_v8f16_idx(<8 x half>* %x, i32 signext %idx) nounwind {
-; RV32-LABEL: extractelt_v8f16_idx:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
-; RV32-NEXT:    vle16.v v25, (a0)
-; RV32-NEXT:    vfadd.vv v25, v25, v25
-; RV32-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
-; RV32-NEXT:    vslidedown.vx v25, v25, a1
-; RV32-NEXT:    vfmv.f.s fa0, v25
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v8f16_idx:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
-; RV64-NEXT:    vle16.v v25, (a0)
-; RV64-NEXT:    vfadd.vv v25, v25, v25
-; RV64-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
-; RV64-NEXT:    vslidedown.vx v25, v25, a1
-; RV64-NEXT:    vfmv.f.s fa0, v25
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v8f16_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
+; CHECK-NEXT:    vle16.v v25, (a0)
+; CHECK-NEXT:    vfadd.vv v25, v25, v25
+; CHECK-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
+; CHECK-NEXT:    vslidedown.vx v25, v25, a1
+; CHECK-NEXT:    vfmv.f.s fa0, v25
+; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %b = fadd <8 x half> %a, %a
   %c = extractelement <8 x half> %b, i32 %idx
@@ -478,25 +335,15 @@ define half @extractelt_v8f16_idx(<8 x half>* %x, i32 signext %idx) nounwind {
 }
 
 define float @extractelt_v4f32_idx(<4 x float>* %x, i32 signext %idx) nounwind {
-; RV32-LABEL: extractelt_v4f32_idx:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
-; RV32-NEXT:    vle32.v v25, (a0)
-; RV32-NEXT:    vfadd.vv v25, v25, v25
-; RV32-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
-; RV32-NEXT:    vslidedown.vx v25, v25, a1
-; RV32-NEXT:    vfmv.f.s fa0, v25
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v4f32_idx:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
-; RV64-NEXT:    vle32.v v25, (a0)
-; RV64-NEXT:    vfadd.vv v25, v25, v25
-; RV64-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
-; RV64-NEXT:    vslidedown.vx v25, v25, a1
-; RV64-NEXT:    vfmv.f.s fa0, v25
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v4f32_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; CHECK-NEXT:    vle32.v v25, (a0)
+; CHECK-NEXT:    vfadd.vv v25, v25, v25
+; CHECK-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
+; CHECK-NEXT:    vslidedown.vx v25, v25, a1
+; CHECK-NEXT:    vfmv.f.s fa0, v25
+; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
   %b = fadd <4 x float> %a, %a
   %c = extractelement <4 x float> %b, i32 %idx
@@ -504,25 +351,15 @@ define float @extractelt_v4f32_idx(<4 x float>* %x, i32 signext %idx) nounwind {
 }
 
 define double @extractelt_v2f64_idx(<2 x double>* %x, i32 signext %idx) nounwind {
-; RV32-LABEL: extractelt_v2f64_idx:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
-; RV32-NEXT:    vle64.v v25, (a0)
-; RV32-NEXT:    vfadd.vv v25, v25, v25
-; RV32-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
-; RV32-NEXT:    vslidedown.vx v25, v25, a1
-; RV32-NEXT:    vfmv.f.s fa0, v25
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v2f64_idx:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
-; RV64-NEXT:    vle64.v v25, (a0)
-; RV64-NEXT:    vfadd.vv v25, v25, v25
-; RV64-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
-; RV64-NEXT:    vslidedown.vx v25, v25, a1
-; RV64-NEXT:    vfmv.f.s fa0, v25
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v2f64_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
+; CHECK-NEXT:    vle64.v v25, (a0)
+; CHECK-NEXT:    vfadd.vv v25, v25, v25
+; CHECK-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vslidedown.vx v25, v25, a1
+; CHECK-NEXT:    vfmv.f.s fa0, v25
+; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
   %b = fadd <2 x double> %a, %a
   %c = extractelement <2 x double> %b, i32 %idx
@@ -530,73 +367,44 @@ define double @extractelt_v2f64_idx(<2 x double>* %x, i32 signext %idx) nounwind
 }
 
 define i8 @extractelt_v32i8_idx(<32 x i8>* %x, i32 signext %idx) nounwind {
-; RV32-LABEL: extractelt_v32i8_idx:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi a2, zero, 32
-; RV32-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
-; RV32-NEXT:    vle8.v v26, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
-; RV32-NEXT:    vslidedown.vx v26, v26, a1
-; RV32-NEXT:    vmv.x.s a0, v26
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v32i8_idx:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi a2, zero, 32
-; RV64-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
-; RV64-NEXT:    vle8.v v26, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
-; RV64-NEXT:    vslidedown.vx v26, v26, a1
-; RV64-NEXT:    vmv.x.s a0, v26
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v32i8_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vle8.v v26, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
+; CHECK-NEXT:    vslidedown.vx v26, v26, a1
+; CHECK-NEXT:    vmv.x.s a0, v26
+; CHECK-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
   %b = extractelement <32 x i8> %a, i32 %idx
   ret i8 %b
 }
 
 define i16 @extractelt_v16i16_idx(<16 x i16>* %x, i32 signext %idx) nounwind {
-; RV32-LABEL: extractelt_v16i16_idx:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
-; RV32-NEXT:    vle16.v v26, (a0)
-; RV32-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
-; RV32-NEXT:    vslidedown.vx v26, v26, a1
-; RV32-NEXT:    vmv.x.s a0, v26
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v16i16_idx:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
-; RV64-NEXT:    vle16.v v26, (a0)
-; RV64-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
-; RV64-NEXT:    vslidedown.vx v26, v26, a1
-; RV64-NEXT:    vmv.x.s a0, v26
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v16i16_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
+; CHECK-NEXT:    vle16.v v26, (a0)
+; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
+; CHECK-NEXT:    vslidedown.vx v26, v26, a1
+; CHECK-NEXT:    vmv.x.s a0, v26
+; CHECK-NEXT:    ret
   %a = load <16 x i16>, <16 x i16>* %x
   %b = extractelement <16 x i16> %a, i32 %idx
   ret i16 %b
 }
 
 define i32 @extractelt_v8i32_idx(<8 x i32>* %x, i32 signext %idx) nounwind {
-; RV32-LABEL: extractelt_v8i32_idx:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
-; RV32-NEXT:    vle32.v v26, (a0)
-; RV32-NEXT:    vadd.vv v26, v26, v26
-; RV32-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
-; RV32-NEXT:    vslidedown.vx v26, v26, a1
-; RV32-NEXT:    vmv.x.s a0, v26
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v8i32_idx:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
-; RV64-NEXT:    vle32.v v26, (a0)
-; RV64-NEXT:    vadd.vv v26, v26, v26
-; RV64-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
-; RV64-NEXT:    vslidedown.vx v26, v26, a1
-; RV64-NEXT:    vmv.x.s a0, v26
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v8i32_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
+; CHECK-NEXT:    vle32.v v26, (a0)
+; CHECK-NEXT:    vadd.vv v26, v26, v26
+; CHECK-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
+; CHECK-NEXT:    vslidedown.vx v26, v26, a1
+; CHECK-NEXT:    vmv.x.s a0, v26
+; CHECK-NEXT:    ret
   %a = load <8 x i32>, <8 x i32>* %x
   %b = add <8 x i32> %a, %a
   %c = extractelement <8 x i32> %b, i32 %idx
@@ -633,25 +441,15 @@ define i64 @extractelt_v4i64_idx(<4 x i64>* %x, i32 signext %idx) nounwind {
 }
 
 define half @extractelt_v16f16_idx(<16 x half>* %x, i32 signext %idx) nounwind {
-; RV32-LABEL: extractelt_v16f16_idx:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
-; RV32-NEXT:    vle16.v v26, (a0)
-; RV32-NEXT:    vfadd.vv v26, v26, v26
-; RV32-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
-; RV32-NEXT:    vslidedown.vx v26, v26, a1
-; RV32-NEXT:    vfmv.f.s fa0, v26
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v16f16_idx:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
-; RV64-NEXT:    vle16.v v26, (a0)
-; RV64-NEXT:    vfadd.vv v26, v26, v26
-; RV64-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
-; RV64-NEXT:    vslidedown.vx v26, v26, a1
-; RV64-NEXT:    vfmv.f.s fa0, v26
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v16f16_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
+; CHECK-NEXT:    vle16.v v26, (a0)
+; CHECK-NEXT:    vfadd.vv v26, v26, v26
+; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
+; CHECK-NEXT:    vslidedown.vx v26, v26, a1
+; CHECK-NEXT:    vfmv.f.s fa0, v26
+; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
   %b = fadd <16 x half> %a, %a
   %c = extractelement <16 x half> %b, i32 %idx
@@ -659,25 +457,15 @@ define half @extractelt_v16f16_idx(<16 x half>* %x, i32 signext %idx) nounwind {
 }
 
 define float @extractelt_v8f32_idx(<8 x float>* %x, i32 signext %idx) nounwind {
-; RV32-LABEL: extractelt_v8f32_idx:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
-; RV32-NEXT:    vle32.v v26, (a0)
-; RV32-NEXT:    vfadd.vv v26, v26, v26
-; RV32-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
-; RV32-NEXT:    vslidedown.vx v26, v26, a1
-; RV32-NEXT:    vfmv.f.s fa0, v26
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v8f32_idx:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
-; RV64-NEXT:    vle32.v v26, (a0)
-; RV64-NEXT:    vfadd.vv v26, v26, v26
-; RV64-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
-; RV64-NEXT:    vslidedown.vx v26, v26, a1
-; RV64-NEXT:    vfmv.f.s fa0, v26
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v8f32_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
+; CHECK-NEXT:    vle32.v v26, (a0)
+; CHECK-NEXT:    vfadd.vv v26, v26, v26
+; CHECK-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
+; CHECK-NEXT:    vslidedown.vx v26, v26, a1
+; CHECK-NEXT:    vfmv.f.s fa0, v26
+; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
   %b = fadd <8 x float> %a, %a
   %c = extractelement <8 x float> %b, i32 %idx
@@ -685,25 +473,15 @@ define float @extractelt_v8f32_idx(<8 x float>* %x, i32 signext %idx) nounwind {
 }
 
 define double @extractelt_v4f64_idx(<4 x double>* %x, i32 signext %idx) nounwind {
-; RV32-LABEL: extractelt_v4f64_idx:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
-; RV32-NEXT:    vle64.v v26, (a0)
-; RV32-NEXT:    vfadd.vv v26, v26, v26
-; RV32-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
-; RV32-NEXT:    vslidedown.vx v26, v26, a1
-; RV32-NEXT:    vfmv.f.s fa0, v26
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: extractelt_v4f64_idx:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
-; RV64-NEXT:    vle64.v v26, (a0)
-; RV64-NEXT:    vfadd.vv v26, v26, v26
-; RV64-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
-; RV64-NEXT:    vslidedown.vx v26, v26, a1
-; RV64-NEXT:    vfmv.f.s fa0, v26
-; RV64-NEXT:    ret
+; CHECK-LABEL: extractelt_v4f64_idx:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
+; CHECK-NEXT:    vle64.v v26, (a0)
+; CHECK-NEXT:    vfadd.vv v26, v26, v26
+; CHECK-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
+; CHECK-NEXT:    vslidedown.vx v26, v26, a1
+; CHECK-NEXT:    vfmv.f.s fa0, v26
+; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
   %b = fadd <4 x double> %a, %a
   %c = extractelement <4 x double> %b, i32 %idx

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
index 2c2b642f484c..7ff6a4d6b734 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 ; FIXME: This codegen needs to be improved. These tests previously asserted
 ; type legalizing the i64 type on RV32.
@@ -79,27 +79,16 @@ define void @insertelt_v3i64(<3 x i64>* %x, i64 %y) {
 }
 
 define void @insertelt_v16i8(<16 x i8>* %x, i8 %y) {
-; RV32-LABEL: insertelt_v16i8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
-; RV32-NEXT:    vle8.v v25, (a0)
-; RV32-NEXT:    vmv.s.x v26, a1
-; RV32-NEXT:    vsetivli a1, 15, e8,m1,tu,mu
-; RV32-NEXT:    vslideup.vi v25, v26, 14
-; RV32-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
-; RV32-NEXT:    vse8.v v25, (a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: insertelt_v16i8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
-; RV64-NEXT:    vle8.v v25, (a0)
-; RV64-NEXT:    vmv.s.x v26, a1
-; RV64-NEXT:    vsetivli a1, 15, e8,m1,tu,mu
-; RV64-NEXT:    vslideup.vi v25, v26, 14
-; RV64-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
-; RV64-NEXT:    vse8.v v25, (a0)
-; RV64-NEXT:    ret
+; CHECK-LABEL: insertelt_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
+; CHECK-NEXT:    vle8.v v25, (a0)
+; CHECK-NEXT:    vmv.s.x v26, a1
+; CHECK-NEXT:    vsetivli a1, 15, e8,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v25, v26, 14
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
+; CHECK-NEXT:    vse8.v v25, (a0)
+; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
   %b = insertelement <16 x i8> %a, i8 %y, i32 14
   store <16 x i8> %b, <16 x i8>* %x
@@ -171,23 +160,14 @@ define void @insertelt_v8f32(<8 x float>* %x, float %y, i32 %idx) {
 }
 
 define void @insertelt_v8i64_0(<8 x i64>* %x) {
-; RV32-LABEL: insertelt_v8i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a1, 8, e64,m4,ta,mu
-; RV32-NEXT:    vle64.v v28, (a0)
-; RV32-NEXT:    addi a1, zero, -1
-; RV32-NEXT:    vmv.s.x v28, a1
-; RV32-NEXT:    vse64.v v28, (a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: insertelt_v8i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a1, 8, e64,m4,ta,mu
-; RV64-NEXT:    vle64.v v28, (a0)
-; RV64-NEXT:    addi a1, zero, -1
-; RV64-NEXT:    vmv.s.x v28, a1
-; RV64-NEXT:    vse64.v v28, (a0)
-; RV64-NEXT:    ret
+; CHECK-LABEL: insertelt_v8i64_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 8, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    addi a1, zero, -1
+; CHECK-NEXT:    vmv.s.x v28, a1
+; CHECK-NEXT:    vse64.v v28, (a0)
+; CHECK-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %x
   %b = insertelement <8 x i64> %a, i64 -1, i32 0
   store <8 x i64> %b, <8 x i64>* %x
@@ -228,23 +208,14 @@ define void @insertelt_v8i64(<8 x i64>* %x, i32 %idx) {
 }
 
 define void @insertelt_c6_v8i64_0(<8 x i64>* %x) {
-; RV32-LABEL: insertelt_c6_v8i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a1, 8, e64,m4,ta,mu
-; RV32-NEXT:    vle64.v v28, (a0)
-; RV32-NEXT:    addi a1, zero, 6
-; RV32-NEXT:    vmv.s.x v28, a1
-; RV32-NEXT:    vse64.v v28, (a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: insertelt_c6_v8i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a1, 8, e64,m4,ta,mu
-; RV64-NEXT:    vle64.v v28, (a0)
-; RV64-NEXT:    addi a1, zero, 6
-; RV64-NEXT:    vmv.s.x v28, a1
-; RV64-NEXT:    vse64.v v28, (a0)
-; RV64-NEXT:    ret
+; CHECK-LABEL: insertelt_c6_v8i64_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 8, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    addi a1, zero, 6
+; CHECK-NEXT:    vmv.s.x v28, a1
+; CHECK-NEXT:    vse64.v v28, (a0)
+; CHECK-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %x
   %b = insertelement <8 x i64> %a, i64 6, i32 0
   store <8 x i64> %b, <8 x i64>* %x
@@ -287,27 +258,16 @@ define void @insertelt_c6_v8i64(<8 x i64>* %x, i32 %idx) {
 ; Test that using a insertelement at element 0 by a later operation doesn't
 ; crash the compiler.
 define void @insertelt_c6_v8i64_0_add(<8 x i64>* %x, <8 x i64>* %y) {
-; RV32-LABEL: insertelt_c6_v8i64_0_add:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli a2, 8, e64,m4,ta,mu
-; RV32-NEXT:    vle64.v v28, (a0)
-; RV32-NEXT:    vle64.v v8, (a1)
-; RV32-NEXT:    addi a1, zero, 6
-; RV32-NEXT:    vmv.s.x v28, a1
-; RV32-NEXT:    vadd.vv v28, v28, v8
-; RV32-NEXT:    vse64.v v28, (a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: insertelt_c6_v8i64_0_add:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli a2, 8, e64,m4,ta,mu
-; RV64-NEXT:    vle64.v v28, (a0)
-; RV64-NEXT:    vle64.v v8, (a1)
-; RV64-NEXT:    addi a1, zero, 6
-; RV64-NEXT:    vmv.s.x v28, a1
-; RV64-NEXT:    vadd.vv v28, v28, v8
-; RV64-NEXT:    vse64.v v28, (a0)
-; RV64-NEXT:    ret
+; CHECK-LABEL: insertelt_c6_v8i64_0_add:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 8, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vle64.v v8, (a1)
+; CHECK-NEXT:    addi a1, zero, 6
+; CHECK-NEXT:    vmv.s.x v28, a1
+; CHECK-NEXT:    vadd.vv v28, v28, v8
+; CHECK-NEXT:    vse64.v v28, (a0)
+; CHECK-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %x
   %b = insertelement <8 x i64> %a, i64 6, i32 0
   %c = load <8 x i64>, <8 x i64>* %y


        


More information about the llvm-commits mailing list