[llvm] f863df9 - [RISCV][NFC] Add common check prefix to reduce duplicate check lines.

via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 21 20:07:15 PDT 2022


Author: jacquesguan
Date: 2022-03-22T11:06:52+08:00
New Revision: f863df9a051095191d9ff63fbf97a12c80cd2c54

URL: https://github.com/llvm/llvm-project/commit/f863df9a051095191d9ff63fbf97a12c80cd2c54
DIFF: https://github.com/llvm/llvm-project/commit/f863df9a051095191d9ff63fbf97a12c80cd2c54.diff

LOG: [RISCV][NFC] Add common check prefix to reduce duplicate check lines.

Differential Revision: https://reviews.llvm.org/D122120

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
    llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll
    llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
    llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
    llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
index 39fbdf32e6a52..2d0436d3b8dcb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -1,32 +1,20 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s --check-prefix=RV32
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s --check-prefix=RV64
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 ; Check that we correctly scale the split part indirect offsets by VSCALE.
 define <vscale x 32 x i32> @callee_scalable_vector_split_indirect(<vscale x 32 x i32> %x, <vscale x 32 x i32> %y) {
-; RV32-LABEL: callee_scalable_vector_split_indirect:
-; RV32:       # %bb.0:
-; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a1, a1, 3
-; RV32-NEXT:    add a1, a0, a1
-; RV32-NEXT:    vl8re32.v v24, (a0)
-; RV32-NEXT:    vl8re32.v v0, (a1)
-; RV32-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
-; RV32-NEXT:    vadd.vv v8, v8, v24
-; RV32-NEXT:    vadd.vv v16, v16, v0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: callee_scalable_vector_split_indirect:
-; RV64:       # %bb.0:
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 3
-; RV64-NEXT:    add a1, a0, a1
-; RV64-NEXT:    vl8re32.v v24, (a0)
-; RV64-NEXT:    vl8re32.v v0, (a1)
-; RV64-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
-; RV64-NEXT:    vadd.vv v8, v8, v24
-; RV64-NEXT:    vadd.vv v16, v16, v0
-; RV64-NEXT:    ret
+; CHECK-LABEL: callee_scalable_vector_split_indirect:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, a0, a1
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vl8re32.v v0, (a1)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
+; CHECK-NEXT:    vadd.vv v8, v8, v24
+; CHECK-NEXT:    vadd.vv v16, v16, v0
+; CHECK-NEXT:    ret
   %a = add <vscale x 32 x i32> %x, %y
   ret <vscale x 32 x i32> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 5734dcaab8728..ca45a3d09d291 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
-; RUN:     -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
+; RUN:     -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
-; RUN:     -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
+; RUN:     -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 declare <1 x i8> @llvm.masked.gather.v1i8.v1p0i8(<1 x i8*>, i32, <1 x i1>, <1 x i8>)
 
@@ -973,15 +973,10 @@ define <4 x i64> @mgather_truemask_v4i64(<4 x i64*> %ptrs, <4 x i64> %passthru)
 }
 
 define <4 x i64> @mgather_falsemask_v4i64(<4 x i64*> %ptrs, <4 x i64> %passthru) {
-; RV32-LABEL: mgather_falsemask_v4i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vmv2r.v v8, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mgather_falsemask_v4i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vmv2r.v v8, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: mgather_falsemask_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
   %v = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> %ptrs, i32 8, <4 x i1> zeroinitializer, <4 x i64> %passthru)
   ret <4 x i64> %v
 }
@@ -1843,15 +1838,10 @@ define <4 x double> @mgather_truemask_v4f64(<4 x double*> %ptrs, <4 x double> %p
 }
 
 define <4 x double> @mgather_falsemask_v4f64(<4 x double*> %ptrs, <4 x double> %passthru) {
-; RV32-LABEL: mgather_falsemask_v4f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vmv2r.v v8, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mgather_falsemask_v4f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vmv2r.v v8, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: mgather_falsemask_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
   %v = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> %ptrs, i32 8, <4 x i1> zeroinitializer, <4 x double> %passthru)
   ret <4 x double> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index c19ca814d7457..000f1a397b4d9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
-; RUN:     -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
+; RUN:     -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
-; RUN:     -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
+; RUN:     -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 declare void @llvm.masked.scatter.v1i8.v1p0i8(<1 x i8>, <1 x i8*>, i32, <1 x i1>)
 
@@ -146,13 +146,9 @@ define void @mscatter_truemask_v4i8(<4 x i8> %val, <4 x i8*> %ptrs) {
 }
 
 define void @mscatter_falsemask_v4i8(<4 x i8> %val, <4 x i8*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_v4i8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_v4i8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %val, <4 x i8*> %ptrs, i32 1, <4 x i1> zeroinitializer)
   ret void
 }
@@ -311,13 +307,9 @@ define void @mscatter_truemask_v4i16(<4 x i16> %val, <4 x i16*> %ptrs) {
 }
 
 define void @mscatter_falsemask_v4i16(<4 x i16> %val, <4 x i16*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_v4i16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_v4i16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %val, <4 x i16*> %ptrs, i32 2, <4 x i1> zeroinitializer)
   ret void
 }
@@ -526,13 +518,9 @@ define void @mscatter_truemask_v4i32(<4 x i32> %val, <4 x i32*> %ptrs) {
 }
 
 define void @mscatter_falsemask_v4i32(<4 x i32> %val, <4 x i32*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_v4i32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_v4i32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, i32 4, <4 x i1> zeroinitializer)
   ret void
 }
@@ -785,13 +773,9 @@ define void @mscatter_truemask_v4i64(<4 x i64> %val, <4 x i64*> %ptrs) {
 }
 
 define void @mscatter_falsemask_v4i64(<4 x i64> %val, <4 x i64*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_v4i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_v4i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.v4i64.v4p0i64(<4 x i64> %val, <4 x i64*> %ptrs, i32 8, <4 x i1> zeroinitializer)
   ret void
 }
@@ -1124,13 +1108,9 @@ define void @mscatter_truemask_v4f16(<4 x half> %val, <4 x half*> %ptrs) {
 }
 
 define void @mscatter_falsemask_v4f16(<4 x half> %val, <4 x half*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_v4f16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_v4f16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_v4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half> %val, <4 x half*> %ptrs, i32 2, <4 x i1> zeroinitializer)
   ret void
 }
@@ -1320,13 +1300,9 @@ define void @mscatter_truemask_v4f32(<4 x float> %val, <4 x float*> %ptrs) {
 }
 
 define void @mscatter_falsemask_v4f32(<4 x float> %val, <4 x float*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_v4f32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_v4f32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %val, <4 x float*> %ptrs, i32 4, <4 x i1> zeroinitializer)
   ret void
 }
@@ -1579,13 +1555,9 @@ define void @mscatter_truemask_v4f64(<4 x double> %val, <4 x double*> %ptrs) {
 }
 
 define void @mscatter_falsemask_v4f64(<4 x double> %val, <4 x double*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_v4f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_v4f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.v4f64.v4p0f64(<4 x double> %val, <4 x double*> %ptrs, i32 8, <4 x i1> zeroinitializer)
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index d6d1e20b7013e..9c4729b65e449 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -1,69 +1,45 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefix=RV32
+; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefix=RV64
+; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
 
 define <4 x i32> @load_v4i32_align1(<4 x i32>* %ptr) {
-; RV32-LABEL: load_v4i32_align1:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
-; RV32-NEXT:    vle8.v v8, (a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: load_v4i32_align1:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
-; RV64-NEXT:    vle8.v v8, (a0)
-; RV64-NEXT:    ret
+; CHECK-LABEL: load_v4i32_align1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
   %z = load <4 x i32>, <4 x i32>* %ptr, align 1
   ret <4 x i32> %z
 }
 
 define <4 x i32> @load_v4i32_align2(<4 x i32>* %ptr) {
-; RV32-LABEL: load_v4i32_align2:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
-; RV32-NEXT:    vle8.v v8, (a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: load_v4i32_align2:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
-; RV64-NEXT:    vle8.v v8, (a0)
-; RV64-NEXT:    ret
+; CHECK-LABEL: load_v4i32_align2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
   %z = load <4 x i32>, <4 x i32>* %ptr, align 2
   ret <4 x i32> %z
 }
 
 define void @store_v4i32_align1(<4 x i32> %x, <4 x i32>* %ptr) {
-; RV32-LABEL: store_v4i32_align1:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
-; RV32-NEXT:    vse8.v v8, (a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: store_v4i32_align1:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
-; RV64-NEXT:    vse8.v v8, (a0)
-; RV64-NEXT:    ret
+; CHECK-LABEL: store_v4i32_align1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
   store <4 x i32> %x, <4 x i32>* %ptr, align 1
   ret void
 }
 
 define void @store_v4i32_align2(<4 x i32> %x, <4 x i32>* %ptr) {
-; RV32-LABEL: store_v4i32_align2:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
-; RV32-NEXT:    vse8.v v8, (a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: store_v4i32_align2:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
-; RV64-NEXT:    vse8.v v8, (a0)
-; RV64-NEXT:    ret
+; CHECK-LABEL: store_v4i32_align2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
   store <4 x i32> %x, <4 x i32>* %ptr, align 2
   ret void
 }
@@ -656,91 +632,48 @@ define void @masked_load_v2i32_align1(<2 x i32>* %a, <2 x i32> %m, <2 x i32>* %r
 declare void @llvm.masked.store.v2i32.p0v2i32(<2 x i32>, <2 x i32>*, i32, <2 x i1>)
 
 define void @masked_store_v2i32_align2(<2 x i32> %val, <2 x i32>* %a, <2 x i32> %m) nounwind {
-; RV32-LABEL: masked_store_v2i32_align2:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV32-NEXT:    vmseq.vi v0, v9, 0
-; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32-NEXT:    vmv.v.i v9, 0
-; RV32-NEXT:    vmerge.vim v9, v9, 1, v0
-; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; RV32-NEXT:    vmv.v.i v10, 0
-; RV32-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
-; RV32-NEXT:    vslideup.vi v10, v9, 0
-; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; RV32-NEXT:    vmsne.vi v9, v10, 0
-; RV32-NEXT:    addi a1, sp, 15
-; RV32-NEXT:    vsm.v v9, (a1)
-; RV32-NEXT:    lbu a1, 15(sp)
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB9_3
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a1, a1, 2
-; RV32-NEXT:    bnez a1, .LBB9_4
-; RV32-NEXT:  .LBB9_2: # %else2
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB9_3: # %cond.store
-; RV32-NEXT:    vsetivli zero, 0, e32, mf2, ta, mu
-; RV32-NEXT:    vmv.x.s a2, v8
-; RV32-NEXT:    sh a2, 0(a0)
-; RV32-NEXT:    srli a2, a2, 16
-; RV32-NEXT:    sh a2, 2(a0)
-; RV32-NEXT:    andi a1, a1, 2
-; RV32-NEXT:    beqz a1, .LBB9_2
-; RV32-NEXT:  .LBB9_4: # %cond.store1
-; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vmv.x.s a1, v8
-; RV32-NEXT:    sh a1, 4(a0)
-; RV32-NEXT:    srli a1, a1, 16
-; RV32-NEXT:    sh a1, 6(a0)
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: masked_store_v2i32_align2:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV64-NEXT:    vmseq.vi v0, v9, 0
-; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64-NEXT:    vmv.v.i v9, 0
-; RV64-NEXT:    vmerge.vim v9, v9, 1, v0
-; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; RV64-NEXT:    vmv.v.i v10, 0
-; RV64-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
-; RV64-NEXT:    vslideup.vi v10, v9, 0
-; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; RV64-NEXT:    vmsne.vi v9, v10, 0
-; RV64-NEXT:    addi a1, sp, 15
-; RV64-NEXT:    vsm.v v9, (a1)
-; RV64-NEXT:    lbu a1, 15(sp)
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB9_3
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a1, a1, 2
-; RV64-NEXT:    bnez a1, .LBB9_4
-; RV64-NEXT:  .LBB9_2: # %else2
-; RV64-NEXT:    addi sp, sp, 16
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB9_3: # %cond.store
-; RV64-NEXT:    vsetivli zero, 0, e32, mf2, ta, mu
-; RV64-NEXT:    vmv.x.s a2, v8
-; RV64-NEXT:    sh a2, 0(a0)
-; RV64-NEXT:    srli a2, a2, 16
-; RV64-NEXT:    sh a2, 2(a0)
-; RV64-NEXT:    andi a1, a1, 2
-; RV64-NEXT:    beqz a1, .LBB9_2
-; RV64-NEXT:  .LBB9_4: # %cond.store1
-; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
-; RV64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-NEXT:    vmv.x.s a1, v8
-; RV64-NEXT:    sh a1, 4(a0)
-; RV64-NEXT:    srli a1, a1, 16
-; RV64-NEXT:    sh a1, 6(a0)
-; RV64-NEXT:    addi sp, sp, 16
-; RV64-NEXT:    ret
+; CHECK-LABEL: masked_store_v2i32_align2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vmseq.vi v0, v9, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
+; CHECK-NEXT:    vmv.v.i v9, 0
+; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vmv.v.i v10, 0
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vslideup.vi v10, v9, 0
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vmsne.vi v9, v10, 0
+; CHECK-NEXT:    addi a1, sp, 15
+; CHECK-NEXT:    vsm.v v9, (a1)
+; CHECK-NEXT:    lbu a1, 15(sp)
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB9_3
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB9_4
+; CHECK-NEXT:  .LBB9_2: # %else2
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB9_3: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 0, e32, mf2, ta, mu
+; CHECK-NEXT:    vmv.x.s a2, v8
+; CHECK-NEXT:    sh a2, 0(a0)
+; CHECK-NEXT:    srli a2, a2, 16
+; CHECK-NEXT:    sh a2, 2(a0)
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB9_2
+; CHECK-NEXT:  .LBB9_4: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    sh a1, 4(a0)
+; CHECK-NEXT:    srli a1, a1, 16
+; CHECK-NEXT:    sh a1, 6(a0)
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
   %mask = icmp eq <2 x i32> %m, zeroinitializer
   call void @llvm.masked.store.v2i32.p0v2i32(<2 x i32> %val, <2 x i32>* %a, i32 2, <2 x i1> %mask)
   ret void

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index 9e27e120d437a..a4f861068e2f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 declare <vscale x 1 x i8> @llvm.masked.gather.nxv1i8.nxv1p0i8(<vscale x 1 x i8*>, i32, <vscale x 1 x i1>, <vscale x 1 x i8>)
 
@@ -979,15 +979,10 @@ define <vscale x 4 x i64> @mgather_truemask_nxv4i64(<vscale x 4 x i64*> %ptrs, <
 }
 
 define <vscale x 4 x i64> @mgather_falsemask_nxv4i64(<vscale x 4 x i64*> %ptrs, <vscale x 4 x i64> %passthru) {
-; RV32-LABEL: mgather_falsemask_nxv4i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vmv4r.v v8, v12
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mgather_falsemask_nxv4i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vmv4r.v v8, v12
-; RV64-NEXT:    ret
+; CHECK-LABEL: mgather_falsemask_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
   %v = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0i64(<vscale x 4 x i64*> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i64> %passthru)
   ret <vscale x 4 x i64> %v
 }
@@ -1915,15 +1910,10 @@ define <vscale x 4 x double> @mgather_truemask_nxv4f64(<vscale x 4 x double*> %p
 }
 
 define <vscale x 4 x double> @mgather_falsemask_nxv4f64(<vscale x 4 x double*> %ptrs, <vscale x 4 x double> %passthru) {
-; RV32-LABEL: mgather_falsemask_nxv4f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vmv4r.v v8, v12
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mgather_falsemask_nxv4f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vmv4r.v v8, v12
-; RV64-NEXT:    ret
+; CHECK-LABEL: mgather_falsemask_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
   %v = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0f64(<vscale x 4 x double*> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x double> %passthru)
   ret <vscale x 4 x double> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index 668765ce6e6d2..411094615c5f6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 declare void @llvm.masked.scatter.nxv1i8.nxv1p0i8(<vscale x 1 x i8>, <vscale x 1 x i8*>, i32, <vscale x 1 x i1>)
 
@@ -146,13 +146,9 @@ define void @mscatter_truemask_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8*>
 }
 
 define void @mscatter_falsemask_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_nxv4i8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_nxv4i8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.nxv4i8.nxv4p0i8(<vscale x 4 x i8> %val, <vscale x 4 x i8*> %ptrs, i32 1, <vscale x 4 x i1> zeroinitializer)
   ret void
 }
@@ -311,13 +307,9 @@ define void @mscatter_truemask_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i1
 }
 
 define void @mscatter_falsemask_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_nxv4i16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_nxv4i16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16> %val, <vscale x 4 x i16*> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer)
   ret void
 }
@@ -526,13 +518,9 @@ define void @mscatter_truemask_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i3
 }
 
 define void @mscatter_falsemask_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_nxv4i32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_nxv4i32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.nxv4i32.nxv4p0i32(<vscale x 4 x i32> %val, <vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer)
   ret void
 }
@@ -785,13 +773,9 @@ define void @mscatter_truemask_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i6
 }
 
 define void @mscatter_falsemask_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_nxv4i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_nxv4i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.nxv4i64.nxv4p0i64(<vscale x 4 x i64> %val, <vscale x 4 x i64*> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer)
   ret void
 }
@@ -1124,13 +1108,9 @@ define void @mscatter_truemask_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x h
 }
 
 define void @mscatter_falsemask_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_nxv4f16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_nxv4f16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.nxv4f16.nxv4p0f16(<vscale x 4 x half> %val, <vscale x 4 x half*> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer)
   ret void
 }
@@ -1320,13 +1300,9 @@ define void @mscatter_truemask_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x
 }
 
 define void @mscatter_falsemask_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_nxv4f32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_nxv4f32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.nxv4f32.nxv4p0f32(<vscale x 4 x float> %val, <vscale x 4 x float*> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer)
   ret void
 }
@@ -1579,13 +1555,9 @@ define void @mscatter_truemask_nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x
 }
 
 define void @mscatter_falsemask_nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double*> %ptrs) {
-; RV32-LABEL: mscatter_falsemask_nxv4f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mscatter_falsemask_nxv4f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    ret
+; CHECK-LABEL: mscatter_falsemask_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
   call void @llvm.masked.scatter.nxv4f64.nxv4p0f64(<vscale x 4 x double> %val, <vscale x 4 x double*> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer)
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll
index 9ea575fc8e1fa..c7844882c37c1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
-; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefix=RV32
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
-; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefix=RV64
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
 
 declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
@@ -12,17 +12,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
   iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV32-NEXT:    vfmacc.vv v8, v10, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV64-NEXT:    vfmacc.vv v8, v10, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vfmacc.vv v8, v10, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -41,17 +35,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
   iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV32-NEXT:    vfmadd.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV64-NEXT:    vfmadd.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vfmadd.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -70,17 +58,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
   iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV32-NEXT:    vfmsac.vv v8, v10, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV64-NEXT:    vfmsac.vv v8, v10, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vfmsac.vv v8, v10, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -99,17 +81,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
   iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV32-NEXT:    vfmsub.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV64-NEXT:    vfmsub.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vfmsub.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -128,17 +104,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
   iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV32-NEXT:    vfnmacc.vv v8, v10, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV64-NEXT:    vfnmacc.vv v8, v10, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vfnmacc.vv v8, v10, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -157,17 +127,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
   iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV32-NEXT:    vfnmadd.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV64-NEXT:    vfnmadd.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -186,17 +150,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
   iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV32-NEXT:    vfnmsac.vv v8, v10, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV64-NEXT:    vfnmsac.vv v8, v10, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vfnmsac.vv v8, v10, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -215,17 +173,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
   iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV32-NEXT:    vfnmsub.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; RV64-NEXT:    vfnmsub.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -244,17 +196,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
   iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV32-NEXT:    vfwmacc.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV64-NEXT:    vfwmacc.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -273,17 +219,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
   iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV32-NEXT:    vfwmsac.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV64-NEXT:    vfwmsac.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -302,17 +242,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
   iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV32-NEXT:    vfwnmacc.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV64-NEXT:    vfwnmacc.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -331,17 +265,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
   iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV32-NEXT:    vfwnmsac.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV64-NEXT:    vfwnmsac.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -500,17 +428,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i16>  @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vwmacc.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vwmacc.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vwmacc.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -529,17 +451,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vwmaccsu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vwmaccsu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -558,17 +474,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i16>  @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vwmaccu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vwmaccu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -587,17 +497,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
   iXLen);
 
 define <vscale x 1 x i16>  @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; RV32-NEXT:    vwmaccus.vx v8, a0, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; RV64-NEXT:    vwmaccus.vx v8, a0, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -615,17 +519,11 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
   iXLen);
 
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vredsum.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vredsum.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vredsum.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
     <vscale x 8 x i8> undef,
@@ -643,17 +541,11 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
   iXLen);
 
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vredand.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vredand.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vredand.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
     <vscale x 8 x i8> undef,
@@ -671,17 +563,11 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
   iXLen);
 
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vredor.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vredor.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vredor.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
     <vscale x 8 x i8> undef,
@@ -699,17 +585,11 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
   iXLen);
 
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vredxor.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vredxor.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vredxor.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
     <vscale x 8 x i8> undef,
@@ -727,17 +607,11 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
   iXLen);
 
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vredminu.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vredminu.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vredminu.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
     <vscale x 8 x i8> undef,
@@ -755,17 +629,11 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
   iXLen);
 
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vredmin.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vredmin.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vredmin.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
     <vscale x 8 x i8> undef,
@@ -783,17 +651,11 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
   iXLen);
 
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vredmaxu.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vredmaxu.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vredmaxu.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
     <vscale x 8 x i8> undef,
@@ -811,17 +673,11 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
   iXLen);
 
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vredmax.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vredmax.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vredmax.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
     <vscale x 8 x i8> undef,
@@ -839,17 +695,11 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
   iXLen);
 
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 1 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vwredsumu.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vwredsumu.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vwredsumu.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
     <vscale x 4 x i16> undef,
@@ -867,17 +717,11 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
   iXLen);
 
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 1 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vwredsum.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vwredsum.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vwredsum.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
     <vscale x 4 x i16> undef,
@@ -895,17 +739,11 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
   iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 1 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV32-NEXT:    vfredosum.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV64-NEXT:    vfredosum.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vfredosum.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
     <vscale x 4 x half> undef,
@@ -923,17 +761,11 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
   iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 1 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV32-NEXT:    vfredusum.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV64-NEXT:    vfredusum.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vfredusum.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
     <vscale x 4 x half> undef,
@@ -951,17 +783,11 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
   iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 1 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV32-NEXT:    vfredmax.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV64-NEXT:    vfredmax.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vfredmax.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
     <vscale x 4 x half> undef,
@@ -979,17 +805,11 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
   iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 1 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV32-NEXT:    vfredmin.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV64-NEXT:    vfredmin.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vfredmin.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
     <vscale x 4 x half> undef,
@@ -1007,17 +827,11 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
   iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 1 x half> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV32-NEXT:    vfwredosum.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV64-NEXT:    vfwredosum.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vfwredosum.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
     <vscale x 2 x float> undef,
@@ -1034,17 +848,11 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
   iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 1 x half> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV32-NEXT:    vfwredusum.vs v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV64-NEXT:    vfwredusum.vs v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
     <vscale x 2 x float> undef,
@@ -1062,17 +870,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; RV32-NEXT:    vslidedown.vx v8, v8, a0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; RV64-NEXT:    vslidedown.vx v8, v8, a0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> undef,
@@ -1090,19 +892,12 @@ declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; RV32-NEXT:    vslideup.vx v9, v8, a0
-; RV32-NEXT:    vmv1r.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; RV64-NEXT:    vslideup.vx v9, v8, a0
-; RV64-NEXT:    vmv1r.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT:    vslideup.vx v9, v8, a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> undef,
@@ -1140,17 +935,11 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half>, half, iXLen)
 
 define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(half %0, iXLen %1) nounwind {
-; RV32-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV32-NEXT:    vfmv.s.f v8, fa0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; RV64-NEXT:    vfmv.s.f v8, fa0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vfmv.s.f v8, fa0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half> undef, half %0, iXLen %1)
   ret <vscale x 1 x half> %a
@@ -1163,19 +952,12 @@ declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vcompress_um_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vcompress_um_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV32-NEXT:    vcompress.vm v9, v8, v0
-; RV32-NEXT:    vmv1r.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vcompress_um_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; RV64-NEXT:    vcompress.vm v9, v8, v0
-; RV64-NEXT:    vmv1r.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vcompress_um_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vcompress.vm v9, v8, v0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
     <vscale x 1 x i8> undef,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
index f5fe08a2aee9a..69fd3d4a69cf1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
-; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefix=RV32
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
-; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefix=RV64
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
 
 declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
   <vscale x 1 x i8>,
@@ -10,17 +10,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vle_v_tu_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vle_v_tu_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; RV32-NEXT:    vle8.v v8, (a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vle_v_tu_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; RV64-NEXT:    vle8.v v8, (a0)
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vle_v_tu_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -38,17 +32,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vlse(
 
 
 define <vscale x 1 x i8> @intrinsic_vlse_v_tu(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, iXLen %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vlse_v_tu:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a2, e8, mf8, tu, mu
-; RV32-NEXT:    vlse8.v v8, (a0), a1
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vlse_v_tu:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a2, e8, mf8, tu, mu
-; RV64-NEXT:    vlse8.v v8, (a0), a1
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vlse_v_tu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, tu, mu
+; CHECK-NEXT:    vlse8.v v8, (a0), a1
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse(
     <vscale x 1 x i8> %0,
@@ -98,17 +86,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vloxei_v_tu_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vloxei_v_tu_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; RV32-NEXT:    vloxei8.v v8, (a0), v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vloxei_v_tu_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; RV64-NEXT:    vloxei8.v v8, (a0), v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vloxei_v_tu_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT:    vloxei8.v v8, (a0), v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -126,17 +108,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vaadd.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vaadd.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vaadd.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -154,17 +130,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vaaddu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vaaddu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vaaddu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -182,17 +152,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vadd.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vadd.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vadd.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -209,17 +173,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vand.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vand.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vand.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -237,17 +195,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vasub.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vasub.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vasub.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -265,17 +217,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vasubu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vasubu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vasubu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -293,17 +239,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vdiv.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vdiv.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vdiv.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -321,17 +261,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vdivu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vdivu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vdivu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -349,17 +283,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfadd.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfadd.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfadd.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -377,17 +305,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfdiv.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfdiv.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfdiv.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -405,17 +327,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfmax.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfmax.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfmax.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -433,17 +349,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfmin.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfmin.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfmin.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -461,17 +371,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfmul.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfmul.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfmul.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -489,17 +393,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfrdiv.vf v8, v9, fa0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfrdiv.vf v8, v9, fa0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -517,17 +415,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfsgnj.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfsgnj.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -545,17 +437,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfsgnjn.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfsgnjn.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -573,17 +459,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfsgnjx.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfsgnjx.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -601,17 +481,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfrsub.vf v8, v9, fa0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfrsub.vf v8, v9, fa0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfrsub.vf v8, v9, fa0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -629,17 +503,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfslide1down.vf v8, v9, fa0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfslide1down.vf v8, v9, fa0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -657,17 +525,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfslide1up.vf v8, v9, fa0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfslide1up.vf v8, v9, fa0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -685,17 +547,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfwsub.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfwsub.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwsub.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -713,17 +569,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
   iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfwsub.wv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfwsub.wv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwsub.wv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -741,19 +591,12 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
   iXLen);
 
 define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vl4re16.v v24, (a0)
-; RV32-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; RV32-NEXT:    vfwsub.wv v8, v16, v24
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vl4re16.v v24, (a0)
-; RV64-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; RV64-NEXT:    vfwsub.wv v8, v16, v24
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl4re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT:    vfwsub.wv v8, v16, v24
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -771,17 +614,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfwmul.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfwmul.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwmul.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -799,17 +636,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
   iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfwadd.wv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfwadd.wv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwadd.wv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -827,17 +658,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfwadd.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfwadd.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwadd.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -855,17 +680,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfsub.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfsub.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfsub.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -948,17 +767,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vmax.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vmax.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vmax.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -976,17 +789,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vmaxu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vmaxu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vmaxu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1004,17 +811,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vmin.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vmin.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vmin.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1032,17 +833,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vminu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vminu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vminu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1060,17 +855,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vmul.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vmul.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vmul.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1088,17 +877,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vmulh.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vmulh.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vmulh.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1116,17 +899,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vmulhsu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vmulhsu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vmulhsu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1144,17 +921,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vmulhu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vmulhu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vmulhu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1172,17 +943,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vnclip.wv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vnclip.wv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vnclip.wv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1200,17 +965,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vnclipu.wv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vnclipu.wv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vnclipu.wv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1228,17 +987,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vnsra.wv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vnsra.wv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vnsra.wv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1256,17 +1009,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vnsrl.wv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vnsrl.wv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vnsrl.wv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1284,17 +1031,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vor.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vor.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vor.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1312,17 +1053,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vrem.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vrem.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vrem.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1345,17 +1080,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vrgather.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vrgather.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vrgather.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
     <vscale x 1 x i8> %0,
@@ -1373,17 +1102,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; RV32-NEXT:    vrgather.vx v8, v9, a0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; RV64-NEXT:    vrgather.vx v8, v9, a0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT:    vrgather.vx v8, v9, a0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1401,17 +1124,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vrgatherei16.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vrgatherei16.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1499,17 +1216,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vsaddu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vsaddu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsaddu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1527,17 +1238,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vsll.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vsll.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsll.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1555,17 +1260,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vsmul.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vsmul.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsmul.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1618,17 +1317,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vsra.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vsra.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsra.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1645,17 +1338,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vsrl.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vsrl.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsrl.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1673,17 +1360,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vssra.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vssra.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vssra.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1701,17 +1382,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vssrl.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vssrl.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vssrl.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1729,17 +1404,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vssub.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vssub.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vssub.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1757,17 +1426,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vssubu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vssubu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vssubu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1855,17 +1518,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vsub.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vsub.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsub.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1883,17 +1540,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vwadd.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vwadd.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vwadd.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -1911,17 +1562,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vwadd.wv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vwadd.wv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vwadd.wv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -1939,17 +1584,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vwaddu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vwaddu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vwaddu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -1967,17 +1606,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vwmul.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vwmul.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vwmul.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -1995,17 +1628,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vwmulu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vwmulu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vwmulu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -2023,17 +1650,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vwmulsu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vwmulsu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vwmulsu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -2051,17 +1672,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vwsub.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vwsub.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vwsub.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -2079,17 +1694,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vwsub.wv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vwsub.wv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vwsub.wv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -2101,17 +1710,11 @@ entry:
 }
 
 define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vwsub.wv v8, v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vwsub.wv v8, v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vwsub.wv v8, v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -2129,17 +1732,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vwsubu.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vwsubu.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vwsubu.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -2157,17 +1754,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vwsubu.wv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vwsubu.wv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vwsubu.wv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -2185,17 +1776,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vxor.vv v8, v9, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vxor.vv v8, v9, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vxor.vv v8, v9, v10
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -2212,17 +1797,11 @@ declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i64> @intrinsic_vsext_vf8_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vsext_vf8_nxv1i64:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; RV32-NEXT:    vsext.vf8 v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vsext_vf8_nxv1i64:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; RV64-NEXT:    vsext.vf8 v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vsext_vf8_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vsext.vf8 v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -2238,17 +1817,11 @@ declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vzext_vf8_nxv1i64:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; RV32-NEXT:    vzext.vf8 v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vzext_vf8_nxv1i64:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; RV64-NEXT:    vzext.vf8 v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vzext.vf8 v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -2264,17 +1837,11 @@ declare <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
   iXLen);
 
 define <vscale x 2 x i16> @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( <vscale x 2 x i16> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; RV32-NEXT:    vfncvt.x.f.w v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; RV64-NEXT:    vfncvt.x.f.w v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vfncvt.x.f.w v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -2289,17 +1856,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
-; RV32-LABEL: intrinsic_vid_v_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vid.v v8
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vid_v_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vid.v v8
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vid_v_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vid.v v8
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -2314,17 +1875,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
-; RV32-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfclass.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfclass.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfclass.v v8, v9
+; CHECK-NEXT:    ret
   <vscale x 1 x i16> %0,
   <vscale x 1 x half> %1,
   iXLen %2) nounwind {
@@ -2343,17 +1898,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfcvt.f.x.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfcvt.f.x.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfcvt.f.x.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -2369,17 +1918,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfcvt.f.xu.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfcvt.f.xu.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfcvt.f.xu.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -2395,17 +1938,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfcvt.rtz.x.f.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfcvt.rtz.x.f.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -2421,17 +1958,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfcvt.rtz.xu.f.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfcvt.rtz.xu.f.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -2447,17 +1978,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfcvt.x.f.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfcvt.x.f.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfcvt.x.f.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -2473,17 +1998,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfncvt.f.f.w v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfncvt.f.f.w v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfncvt.f.f.w v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
     <vscale x 1 x half> %0,
@@ -2499,17 +2018,11 @@ declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x i16> @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfcvt.xu.f.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfcvt.xu.f.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfcvt.xu.f.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -2525,17 +2038,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfncvt.f.x.w v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfncvt.f.x.w v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfncvt.f.x.w v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2551,17 +2058,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfncvt.f.xu.w v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfncvt.f.xu.w v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfncvt.f.xu.w v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2577,17 +2078,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfncvt.rod.f.f.w v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfncvt.rod.f.f.w v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
     <vscale x 1 x half> %0,
@@ -2603,17 +2098,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vfncvt.rtz.x.f.w v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vfncvt.rtz.x.f.w v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -2629,17 +2118,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vfncvt.rtz.xu.f.w v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vfncvt.rtz.xu.f.w v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -2655,17 +2138,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vfncvt.x.f.w v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vfncvt.x.f.w v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vfncvt.x.f.w v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -2681,17 +2158,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vfncvt.xu.f.w v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vfncvt.xu.f.w v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vfncvt.xu.f.w v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -2707,17 +2178,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfrec7.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfrec7.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfrec7.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
     <vscale x 1 x half> %0,
@@ -2733,17 +2198,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfrsqrt7.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfrsqrt7.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfrsqrt7.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
     <vscale x 1 x half> %0,
@@ -2759,17 +2218,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfsqrt.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfsqrt.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfsqrt.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
     <vscale x 1 x half> %0,
@@ -2785,17 +2238,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
   iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfwcvt.f.f.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfwcvt.f.f.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -2811,17 +2258,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vfwcvt.f.x.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vfwcvt.f.x.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -2837,17 +2278,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vfwcvt.f.xu.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vfwcvt.f.xu.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -2863,17 +2298,11 @@ declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
   iXLen);
 
 define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfwcvt.rtz.x.f.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfwcvt.rtz.x.f.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -2889,17 +2318,11 @@ declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
   iXLen);
 
 define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfwcvt.rtz.xu.f.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfwcvt.rtz.xu.f.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -2915,17 +2338,11 @@ declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
   iXLen);
 
 define <vscale x 1 x i32> @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfwcvt.x.f.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfwcvt.x.f.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -2941,17 +2358,11 @@ declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
   iXLen);
 
 define <vscale x 1 x i32> @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vfwcvt.xu.f.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vfwcvt.xu.f.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -2967,17 +2378,11 @@ declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    viota.m v8, v0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    viota.m v8, v0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    viota.m v8, v0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -2995,17 +2400,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
-; RV32-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vadc.vvm v8, v9, v10, v0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vadc.vvm v8, v9, v10, v0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vadc.vvm v8, v9, v10, v0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -3025,17 +2424,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
-; RV32-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vsbc.vvm v8, v9, v10, v0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vsbc.vvm v8, v9, v10, v0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsbc.vvm v8, v9, v10, v0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -3055,17 +2448,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
-; RV32-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vmerge.vvm v8, v9, v10, v0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vmerge.vvm v8, v9, v10, v0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vmerge.vvm v8, v9, v10, v0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -3156,17 +2543,11 @@ declare <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
   iXLen);
 
 define <vscale x 8 x double> @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
-; RV32-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; RV32-NEXT:    vfmerge.vfm v8, v16, fa0, v0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; RV64-NEXT:    vfmerge.vfm v8, v16, fa0, v0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
+; CHECK-NEXT:    vfmerge.vfm v8, v16, fa0, v0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -3186,17 +2567,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
-; RV32-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vmerge.vvm v8, v9, v10, v0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vmerge.vvm v8, v9, v10, v0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vmerge.vvm v8, v9, v10, v0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -3216,17 +2591,11 @@ declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
   iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
-; RV32-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV32-NEXT:    vmerge.vim v8, v9, 0, v0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; RV64-NEXT:    vmerge.vim v8, v9, 0, v0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vmerge.vim v8, v9, 0, v0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -3244,17 +2613,11 @@ declare <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV32-NEXT:    vmv.v.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; RV64-NEXT:    vmv.v.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -3270,17 +2633,11 @@ declare <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
   iXLen);
 
 define <vscale x 1 x float> @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; RV32-NEXT:    vmv.v.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; RV64-NEXT:    vmv.v.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
     <vscale x 1 x float> %0,
@@ -3327,17 +2684,11 @@ declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
   iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
-; RV32-LABEL: intrinsic_vfmv.v.f_f_nxv1f32:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; RV32-NEXT:    vfmv.v.f v8, fa0
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: intrinsic_vfmv.v.f_f_nxv1f32:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; RV64-NEXT:    vfmv.v.f v8, fa0
-; RV64-NEXT:    ret
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     <vscale x 1 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll
index b8d09bcd9b839..83a4554e69aca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll
@@ -1,260 +1,165 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 define <vscale x 1 x float> @vfpext_nxv1f16_nxv1f32(<vscale x 1 x half> %va) {
 ;
-; RV32-LABEL: vfpext_nxv1f16_nxv1f32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v9, v8
-; RV32-NEXT:    vmv1r.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfpext_nxv1f16_nxv1f32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v9, v8
-; RV64-NEXT:    vmv1r.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfpext_nxv1f16_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
   %evec = fpext <vscale x 1 x half> %va to <vscale x 1 x float>
   ret <vscale x 1 x float> %evec
 }
 
 define <vscale x 1 x double> @vfpext_nxv1f16_nxv1f64(<vscale x 1 x half> %va) {
 ;
-; RV32-LABEL: vfpext_nxv1f16_nxv1f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v9, v8
-; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfpext_nxv1f16_nxv1f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v9, v8
-; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfpext_nxv1f16_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
+; CHECK-NEXT:    ret
   %evec = fpext <vscale x 1 x half> %va to <vscale x 1 x double>
   ret <vscale x 1 x double> %evec
 }
 
 define <vscale x 2 x float> @vfpext_nxv2f16_nxv2f32(<vscale x 2 x half> %va) {
 ;
-; RV32-LABEL: vfpext_nxv2f16_nxv2f32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v9, v8
-; RV32-NEXT:    vmv1r.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfpext_nxv2f16_nxv2f32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v9, v8
-; RV64-NEXT:    vmv1r.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfpext_nxv2f16_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
   %evec = fpext <vscale x 2 x half> %va to <vscale x 2 x float>
   ret <vscale x 2 x float> %evec
 }
 
 define <vscale x 2 x double> @vfpext_nxv2f16_nxv2f64(<vscale x 2 x half> %va) {
 ;
-; RV32-LABEL: vfpext_nxv2f16_nxv2f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v10, v8
-; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v8, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfpext_nxv2f16_nxv2f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v10, v8
-; RV64-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v8, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfpext_nxv2f16_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v8, v10
+; CHECK-NEXT:    ret
   %evec = fpext <vscale x 2 x half> %va to <vscale x 2 x double>
   ret <vscale x 2 x double> %evec
 }
 
 define <vscale x 4 x float> @vfpext_nxv4f16_nxv4f32(<vscale x 4 x half> %va) {
 ;
-; RV32-LABEL: vfpext_nxv4f16_nxv4f32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v10, v8
-; RV32-NEXT:    vmv2r.v v8, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfpext_nxv4f16_nxv4f32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v10, v8
-; RV64-NEXT:    vmv2r.v v8, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfpext_nxv4f16_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
   %evec = fpext <vscale x 4 x half> %va to <vscale x 4 x float>
   ret <vscale x 4 x float> %evec
 }
 
 define <vscale x 4 x double> @vfpext_nxv4f16_nxv4f64(<vscale x 4 x half> %va) {
 ;
-; RV32-LABEL: vfpext_nxv4f16_nxv4f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v12, v8
-; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v8, v12
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfpext_nxv4f16_nxv4f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v12, v8
-; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v8, v12
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfpext_nxv4f16_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v12, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v8, v12
+; CHECK-NEXT:    ret
   %evec = fpext <vscale x 4 x half> %va to <vscale x 4 x double>
   ret <vscale x 4 x double> %evec
 }
 
 define <vscale x 8 x float> @vfpext_nxv8f16_nxv8f32(<vscale x 8 x half> %va) {
 ;
-; RV32-LABEL: vfpext_nxv8f16_nxv8f32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v12, v8
-; RV32-NEXT:    vmv4r.v v8, v12
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfpext_nxv8f16_nxv8f32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v12, v8
-; RV64-NEXT:    vmv4r.v v8, v12
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfpext_nxv8f16_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v12, v8
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
   %evec = fpext <vscale x 8 x half> %va to <vscale x 8 x float>
   ret <vscale x 8 x float> %evec
 }
 
 define <vscale x 8 x double> @vfpext_nxv8f16_nxv8f64(<vscale x 8 x half> %va) {
 ;
-; RV32-LABEL: vfpext_nxv8f16_nxv8f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v16, v8
-; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v8, v16
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfpext_nxv8f16_nxv8f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v16, v8
-; RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v8, v16
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfpext_nxv8f16_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v8, v16
+; CHECK-NEXT:    ret
   %evec = fpext <vscale x 8 x half> %va to <vscale x 8 x double>
   ret <vscale x 8 x double> %evec
 }
 
 define <vscale x 16 x float> @vfpext_nxv16f16_nxv16f32(<vscale x 16 x half> %va) {
 ;
-; RV32-LABEL: vfpext_nxv16f16_nxv16f32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v16, v8
-; RV32-NEXT:    vmv8r.v v8, v16
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfpext_nxv16f16_nxv16f32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v16, v8
-; RV64-NEXT:    vmv8r.v v8, v16
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfpext_nxv16f16_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
   %evec = fpext <vscale x 16 x half> %va to <vscale x 16 x float>
   ret <vscale x 16 x float> %evec
 }
 
 define <vscale x 1 x double> @vfpext_nxv1f32_nxv1f64(<vscale x 1 x float> %va) {
 ;
-; RV32-LABEL: vfpext_nxv1f32_nxv1f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v9, v8
-; RV32-NEXT:    vmv1r.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfpext_nxv1f32_nxv1f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v9, v8
-; RV64-NEXT:    vmv1r.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfpext_nxv1f32_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
   %evec = fpext <vscale x 1 x float> %va to <vscale x 1 x double>
   ret <vscale x 1 x double> %evec
 }
 
 define <vscale x 2 x double> @vfpext_nxv2f32_nxv2f64(<vscale x 2 x float> %va) {
 ;
-; RV32-LABEL: vfpext_nxv2f32_nxv2f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v10, v8
-; RV32-NEXT:    vmv2r.v v8, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfpext_nxv2f32_nxv2f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v10, v8
-; RV64-NEXT:    vmv2r.v v8, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfpext_nxv2f32_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
   %evec = fpext <vscale x 2 x float> %va to <vscale x 2 x double>
   ret <vscale x 2 x double> %evec
 }
 
 define <vscale x 4 x double> @vfpext_nxv4f32_nxv4f64(<vscale x 4 x float> %va) {
 ;
-; RV32-LABEL: vfpext_nxv4f32_nxv4f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v12, v8
-; RV32-NEXT:    vmv4r.v v8, v12
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfpext_nxv4f32_nxv4f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v12, v8
-; RV64-NEXT:    vmv4r.v v8, v12
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfpext_nxv4f32_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v12, v8
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
   %evec = fpext <vscale x 4 x float> %va to <vscale x 4 x double>
   ret <vscale x 4 x double> %evec
 }
 
 define <vscale x 8 x double> @vfpext_nxv8f32_nxv8f64(<vscale x 8 x float> %va) {
 ;
-; RV32-LABEL: vfpext_nxv8f32_nxv8f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; RV32-NEXT:    vfwcvt.f.f.v v16, v8
-; RV32-NEXT:    vmv8r.v v8, v16
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfpext_nxv8f32_nxv8f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; RV64-NEXT:    vfwcvt.f.f.v v16, v8
-; RV64-NEXT:    vmv8r.v v8, v16
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfpext_nxv8f32_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
   %evec = fpext <vscale x 8 x float> %va to <vscale x 8 x double>
   ret <vscale x 8 x double> %evec
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll
index 308fe7bd7ab2a..800fa67ca08bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll
@@ -1,260 +1,165 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
 
 define <vscale x 1 x half> @vfptrunc_nxv1f32_nxv1f16(<vscale x 1 x float> %va) {
 ;
-; RV32-LABEL: vfptrunc_nxv1f32_nxv1f16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vfncvt.f.f.w v9, v8
-; RV32-NEXT:    vmv1r.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfptrunc_nxv1f32_nxv1f16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vfncvt.f.f.w v9, v8
-; RV64-NEXT:    vmv1r.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfptrunc_nxv1f32_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
   %evec = fptrunc <vscale x 1 x float> %va to <vscale x 1 x half>
   ret <vscale x 1 x half> %evec
 }
 
 define <vscale x 2 x half> @vfptrunc_nxv2f32_nxv2f16(<vscale x 2 x float> %va) {
 ;
-; RV32-LABEL: vfptrunc_nxv2f32_nxv2f16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vfncvt.f.f.w v9, v8
-; RV32-NEXT:    vmv1r.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfptrunc_nxv2f32_nxv2f16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vfncvt.f.f.w v9, v8
-; RV64-NEXT:    vmv1r.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
   %evec = fptrunc <vscale x 2 x float> %va to <vscale x 2 x half>
   ret <vscale x 2 x half> %evec
 }
 
 define <vscale x 4 x half> @vfptrunc_nxv4f32_nxv4f16(<vscale x 4 x float> %va) {
 ;
-; RV32-LABEL: vfptrunc_nxv4f32_nxv4f16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; RV32-NEXT:    vfncvt.f.f.w v10, v8
-; RV32-NEXT:    vmv.v.v v8, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfptrunc_nxv4f32_nxv4f16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; RV64-NEXT:    vfncvt.f.f.w v10, v8
-; RV64-NEXT:    vmv.v.v v8, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfptrunc_nxv4f32_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.w v10, v8
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
   %evec = fptrunc <vscale x 4 x float> %va to <vscale x 4 x half>
   ret <vscale x 4 x half> %evec
 }
 
 define <vscale x 8 x half> @vfptrunc_nxv8f32_nxv8f16(<vscale x 8 x float> %va) {
 ;
-; RV32-LABEL: vfptrunc_nxv8f32_nxv8f16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; RV32-NEXT:    vfncvt.f.f.w v12, v8
-; RV32-NEXT:    vmv.v.v v8, v12
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfptrunc_nxv8f32_nxv8f16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; RV64-NEXT:    vfncvt.f.f.w v12, v8
-; RV64-NEXT:    vmv.v.v v8, v12
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfptrunc_nxv8f32_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.w v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
   %evec = fptrunc <vscale x 8 x float> %va to <vscale x 8 x half>
   ret <vscale x 8 x half> %evec
 }
 
 define <vscale x 16 x half> @vfptrunc_nxv16f32_nxv16f16(<vscale x 16 x float> %va) {
 ;
-; RV32-LABEL: vfptrunc_nxv16f32_nxv16f16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; RV32-NEXT:    vfncvt.f.f.w v16, v8
-; RV32-NEXT:    vmv.v.v v8, v16
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfptrunc_nxv16f32_nxv16f16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; RV64-NEXT:    vfncvt.f.f.w v16, v8
-; RV64-NEXT:    vmv.v.v v8, v16
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfptrunc_nxv16f32_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.w v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
   %evec = fptrunc <vscale x 16 x float> %va to <vscale x 16 x half>
   ret <vscale x 16 x half> %evec
 }
 
 define <vscale x 1 x half> @vfptrunc_nxv1f64_nxv1f16(<vscale x 1 x double> %va) {
 ;
-; RV32-LABEL: vfptrunc_nxv1f64_nxv1f16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; RV32-NEXT:    vfncvt.rod.f.f.w v9, v8
-; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vfncvt.f.f.w v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfptrunc_nxv1f64_nxv1f16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; RV64-NEXT:    vfncvt.rod.f.f.w v9, v8
-; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vfncvt.f.f.w v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vfncvt.rod.f.f.w v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.w v8, v9
+; CHECK-NEXT:    ret
   %evec = fptrunc <vscale x 1 x double> %va to <vscale x 1 x half>
   ret <vscale x 1 x half> %evec
 }
 
 define <vscale x 1 x float> @vfptrunc_nxv1f64_nxv1f32(<vscale x 1 x double> %va) {
 ;
-; RV32-LABEL: vfptrunc_nxv1f64_nxv1f32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; RV32-NEXT:    vfncvt.f.f.w v9, v8
-; RV32-NEXT:    vmv1r.v v8, v9
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfptrunc_nxv1f64_nxv1f32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; RV64-NEXT:    vfncvt.f.f.w v9, v8
-; RV64-NEXT:    vmv1r.v v8, v9
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
   %evec = fptrunc <vscale x 1 x double> %va to <vscale x 1 x float>
   ret <vscale x 1 x float> %evec
 }
 
 define <vscale x 2 x half> @vfptrunc_nxv2f64_nxv2f16(<vscale x 2 x double> %va) {
 ;
-; RV32-LABEL: vfptrunc_nxv2f64_nxv2f16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV32-NEXT:    vfncvt.rod.f.f.w v10, v8
-; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vfncvt.f.f.w v8, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfptrunc_nxv2f64_nxv2f16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV64-NEXT:    vfncvt.rod.f.f.w v10, v8
-; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vfncvt.f.f.w v8, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfncvt.rod.f.f.w v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.w v8, v10
+; CHECK-NEXT:    ret
   %evec = fptrunc <vscale x 2 x double> %va to <vscale x 2 x half>
   ret <vscale x 2 x half> %evec
 }
 
 define <vscale x 2 x float> @vfptrunc_nxv2f64_nxv2f32(<vscale x 2 x double> %va) {
 ;
-; RV32-LABEL: vfptrunc_nxv2f64_nxv2f32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV32-NEXT:    vfncvt.f.f.w v10, v8
-; RV32-NEXT:    vmv.v.v v8, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfptrunc_nxv2f64_nxv2f32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV64-NEXT:    vfncvt.f.f.w v10, v8
-; RV64-NEXT:    vmv.v.v v8, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.w v10, v8
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
   %evec = fptrunc <vscale x 2 x double> %va to <vscale x 2 x float>
   ret <vscale x 2 x float> %evec
 }
 
 define <vscale x 4 x half> @vfptrunc_nxv4f64_nxv4f16(<vscale x 4 x double> %va) {
 ;
-; RV32-LABEL: vfptrunc_nxv4f64_nxv4f16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; RV32-NEXT:    vfncvt.rod.f.f.w v12, v8
-; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32-NEXT:    vfncvt.f.f.w v8, v12
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfptrunc_nxv4f64_nxv4f16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; RV64-NEXT:    vfncvt.rod.f.f.w v12, v8
-; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64-NEXT:    vfncvt.f.f.w v8, v12
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vfncvt.rod.f.f.w v12, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.w v8, v12
+; CHECK-NEXT:    ret
   %evec = fptrunc <vscale x 4 x double> %va to <vscale x 4 x half>
   ret <vscale x 4 x half> %evec
 }
 
 define <vscale x 4 x float> @vfptrunc_nxv4f64_nxv4f32(<vscale x 4 x double> %va) {
 ;
-; RV32-LABEL: vfptrunc_nxv4f64_nxv4f32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; RV32-NEXT:    vfncvt.f.f.w v12, v8
-; RV32-NEXT:    vmv.v.v v8, v12
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfptrunc_nxv4f64_nxv4f32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; RV64-NEXT:    vfncvt.f.f.w v12, v8
-; RV64-NEXT:    vmv.v.v v8, v12
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.w v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
   %evec = fptrunc <vscale x 4 x double> %va to <vscale x 4 x float>
   ret <vscale x 4 x float> %evec
 }
 
 define <vscale x 8 x half> @vfptrunc_nxv8f64_nxv8f16(<vscale x 8 x double> %va) {
 ;
-; RV32-LABEL: vfptrunc_nxv8f64_nxv8f16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; RV32-NEXT:    vfncvt.rod.f.f.w v16, v8
-; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32-NEXT:    vfncvt.f.f.w v8, v16
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfptrunc_nxv8f64_nxv8f16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; RV64-NEXT:    vfncvt.rod.f.f.w v16, v8
-; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64-NEXT:    vfncvt.f.f.w v8, v16
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vfncvt.rod.f.f.w v16, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.w v8, v16
+; CHECK-NEXT:    ret
   %evec = fptrunc <vscale x 8 x double> %va to <vscale x 8 x half>
   ret <vscale x 8 x half> %evec
 }
 
 define <vscale x 8 x float> @vfptrunc_nxv8f64_nxv8f32(<vscale x 8 x double> %va) {
 ;
-; RV32-LABEL: vfptrunc_nxv8f64_nxv8f32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; RV32-NEXT:    vfncvt.f.f.w v16, v8
-; RV32-NEXT:    vmv.v.v v8, v16
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vfptrunc_nxv8f64_nxv8f32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; RV64-NEXT:    vfncvt.f.f.w v16, v8
-; RV64-NEXT:    vmv.v.v v8, v16
-; RV64-NEXT:    ret
+; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.w v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
   %evec = fptrunc <vscale x 8 x double> %va to <vscale x 8 x float>
   ret <vscale x 8 x float> %evec
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
index efe47b04c5fbf..6ab24626b6597 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
@@ -1,106 +1,70 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+f,+d,+zfh,+experimental-zvfh,+v -target-abi ilp32d -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefix=RV32V
+; RUN:   | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+zfh,+experimental-zvfh,+v -target-abi lp64d -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefix=RV64V
+; RUN:   | FileCheck %s
 
 define <vscale x 8 x half> @vsplat_nxv8f16(half %f) {
-; RV32V-LABEL: vsplat_nxv8f16:
-; RV32V:       # %bb.0:
-; RV32V-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; RV32V-NEXT:    vfmv.v.f v8, fa0
-; RV32V-NEXT:    ret
-;
-; RV64V-LABEL: vsplat_nxv8f16:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; RV64V-NEXT:    vfmv.v.f v8, fa0
-; RV64V-NEXT:    ret
+; CHECK-LABEL: vsplat_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %f, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x half> %splat
 }
 
 define <vscale x 8 x half> @vsplat_zero_nxv8f16() {
-; RV32V-LABEL: vsplat_zero_nxv8f16:
-; RV32V:       # %bb.0:
-; RV32V-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; RV32V-NEXT:    vmv.v.i v8, 0
-; RV32V-NEXT:    ret
-;
-; RV64V-LABEL: vsplat_zero_nxv8f16:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; RV64V-NEXT:    vmv.v.i v8, 0
-; RV64V-NEXT:    ret
+; CHECK-LABEL: vsplat_zero_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half zeroinitializer, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x half> %splat
 }
 
 define <vscale x 8 x float> @vsplat_nxv8f32(float %f) {
-; RV32V-LABEL: vsplat_nxv8f32:
-; RV32V:       # %bb.0:
-; RV32V-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; RV32V-NEXT:    vfmv.v.f v8, fa0
-; RV32V-NEXT:    ret
-;
-; RV64V-LABEL: vsplat_nxv8f32:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; RV64V-NEXT:    vfmv.v.f v8, fa0
-; RV64V-NEXT:    ret
+; CHECK-LABEL: vsplat_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %f, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x float> %splat
 }
 
 define <vscale x 8 x float> @vsplat_zero_nxv8f32() {
-; RV32V-LABEL: vsplat_zero_nxv8f32:
-; RV32V:       # %bb.0:
-; RV32V-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; RV32V-NEXT:    vmv.v.i v8, 0
-; RV32V-NEXT:    ret
-;
-; RV64V-LABEL: vsplat_zero_nxv8f32:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; RV64V-NEXT:    vmv.v.i v8, 0
-; RV64V-NEXT:    ret
+; CHECK-LABEL: vsplat_zero_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float zeroinitializer, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x float> %splat
 }
 
 define <vscale x 8 x double> @vsplat_nxv8f64(double %f) {
-; RV32V-LABEL: vsplat_nxv8f64:
-; RV32V:       # %bb.0:
-; RV32V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV32V-NEXT:    vfmv.v.f v8, fa0
-; RV32V-NEXT:    ret
-;
-; RV64V-LABEL: vsplat_nxv8f64:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV64V-NEXT:    vfmv.v.f v8, fa0
-; RV64V-NEXT:    ret
+; CHECK-LABEL: vsplat_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %f, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x double> %splat
 }
 
 define <vscale x 8 x double> @vsplat_zero_nxv8f64() {
-; RV32V-LABEL: vsplat_zero_nxv8f64:
-; RV32V:       # %bb.0:
-; RV32V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV32V-NEXT:    vmv.v.i v8, 0
-; RV32V-NEXT:    ret
-;
-; RV64V-LABEL: vsplat_zero_nxv8f64:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV64V-NEXT:    vmv.v.i v8, 0
-; RV64V-NEXT:    ret
+; CHECK-LABEL: vsplat_zero_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double zeroinitializer, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x double> %splat
@@ -108,17 +72,11 @@ define <vscale x 8 x double> @vsplat_zero_nxv8f64() {
 
 ; Test that we fold this to a vlse with 0 stride.
 define <vscale x 8 x float> @vsplat_load_nxv8f32(float* %ptr) {
-; RV32V-LABEL: vsplat_load_nxv8f32:
-; RV32V:       # %bb.0:
-; RV32V-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
-; RV32V-NEXT:    vlse32.v v8, (a0), zero
-; RV32V-NEXT:    ret
-;
-; RV64V-LABEL: vsplat_load_nxv8f32:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
-; RV64V-NEXT:    vlse32.v v8, (a0), zero
-; RV64V-NEXT:    ret
+; CHECK-LABEL: vsplat_load_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vlse32.v v8, (a0), zero
+; CHECK-NEXT:    ret
   %f = load float, float* %ptr
   %head = insertelement <vscale x 8 x float> poison, float %f, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
index b872481a0012d..1df87b084bf6b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
@@ -1,57 +1,38 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefix=RV32V
+; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32V
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefix=RV64V
+; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64V
 
 define <vscale x 8 x i64> @vsplat_nxv8i64_1() {
-; RV32V-LABEL: vsplat_nxv8i64_1:
-; RV32V:       # %bb.0:
-; RV32V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV32V-NEXT:    vmv.v.i v8, -1
-; RV32V-NEXT:    ret
-;
-; RV64V-LABEL: vsplat_nxv8i64_1:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV64V-NEXT:    vmv.v.i v8, -1
-; RV64V-NEXT:    ret
+; CHECK-LABEL: vsplat_nxv8i64_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vmv.v.i v8, -1
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x i64> %splat
 }
 
 define <vscale x 8 x i64> @vsplat_nxv8i64_2() {
-; RV32V-LABEL: vsplat_nxv8i64_2:
-; RV32V:       # %bb.0:
-; RV32V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV32V-NEXT:    vmv.v.i v8, 4
-; RV32V-NEXT:    ret
-;
-; RV64V-LABEL: vsplat_nxv8i64_2:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV64V-NEXT:    vmv.v.i v8, 4
-; RV64V-NEXT:    ret
+; CHECK-LABEL: vsplat_nxv8i64_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vmv.v.i v8, 4
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x i64> %splat
 }
 
 define <vscale x 8 x i64> @vsplat_nxv8i64_3() {
-; RV32V-LABEL: vsplat_nxv8i64_3:
-; RV32V:       # %bb.0:
-; RV32V-NEXT:    li a0, 255
-; RV32V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
-; RV32V-NEXT:    vmv.v.x v8, a0
-; RV32V-NEXT:    ret
-;
-; RV64V-LABEL: vsplat_nxv8i64_3:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    li a0, 255
-; RV64V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
-; RV64V-NEXT:    vmv.v.x v8, a0
-; RV64V-NEXT:    ret
+; CHECK-LABEL: vsplat_nxv8i64_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> poison, i64 255, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x i64> %splat
@@ -109,17 +90,11 @@ define <vscale x 8 x i64> @vsplat_nxv8i64_5(i64 %a) {
 }
 
 define <vscale x 8 x i64> @vadd_vx_nxv8i64_6(<vscale x 8 x i64> %v) {
-; RV32V-LABEL: vadd_vx_nxv8i64_6:
-; RV32V:       # %bb.0:
-; RV32V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV32V-NEXT:    vadd.vi v8, v8, 2
-; RV32V-NEXT:    ret
-;
-; RV64V-LABEL: vadd_vx_nxv8i64_6:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV64V-NEXT:    vadd.vi v8, v8, 2
-; RV64V-NEXT:    ret
+; CHECK-LABEL: vadd_vx_nxv8i64_6:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vadd.vi v8, v8, 2
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> poison, i64 2, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   %vret = add <vscale x 8 x i64> %v, %splat
@@ -127,17 +102,11 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_6(<vscale x 8 x i64> %v) {
 }
 
 define <vscale x 8 x i64> @vadd_vx_nxv8i64_7(<vscale x 8 x i64> %v) {
-; RV32V-LABEL: vadd_vx_nxv8i64_7:
-; RV32V:       # %bb.0:
-; RV32V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV32V-NEXT:    vadd.vi v8, v8, -1
-; RV32V-NEXT:    ret
-;
-; RV64V-LABEL: vadd_vx_nxv8i64_7:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV64V-NEXT:    vadd.vi v8, v8, -1
-; RV64V-NEXT:    ret
+; CHECK-LABEL: vadd_vx_nxv8i64_7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vadd.vi v8, v8, -1
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   %vret = add <vscale x 8 x i64> %v, %splat
@@ -145,19 +114,12 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_7(<vscale x 8 x i64> %v) {
 }
 
 define <vscale x 8 x i64> @vadd_vx_nxv8i64_8(<vscale x 8 x i64> %v) {
-; RV32V-LABEL: vadd_vx_nxv8i64_8:
-; RV32V:       # %bb.0:
-; RV32V-NEXT:    li a0, 255
-; RV32V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
-; RV32V-NEXT:    vadd.vx v8, v8, a0
-; RV32V-NEXT:    ret
-;
-; RV64V-LABEL: vadd_vx_nxv8i64_8:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    li a0, 255
-; RV64V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
-; RV64V-NEXT:    vadd.vx v8, v8, a0
-; RV64V-NEXT:    ret
+; CHECK-LABEL: vadd_vx_nxv8i64_8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vadd.vx v8, v8, a0
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> poison, i64 255, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   %vret = add <vscale x 8 x i64> %v, %splat


        


More information about the llvm-commits mailing list