[llvm] ac269d1 - [RISCV][test] Update CHECK lines in condops related tests in preparation for Zicond codegen

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 29 06:14:43 PDT 2023


Author: Alex Bradbury
Date: 2023-03-29T14:13:37+01:00
New Revision: ac269d185c38ceadfff38c1573334b8d3415f56a

URL: https://github.com/llvm/llvm-project/commit/ac269d185c38ceadfff38c1573334b8d3415f56a
DIFF: https://github.com/llvm/llvm-project/commit/ac269d185c38ceadfff38c1573334b8d3415f56a.diff

LOG: [RISCV][test] Update CHECK lines in condops related tests in preparation for Zicond codegen

Prefixes like 'CONDOPS' referring to the xventanacondops extension are
going to be confusing once zicond is added to the mix.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/select-binop-identity.ll
    llvm/test/CodeGen/RISCV/select.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/select-binop-identity.ll b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
index 3d3cdd78cbf3..78b2cd977644 100644
--- a/llvm/test/CodeGen/RISCV/select-binop-identity.ll
+++ b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
@@ -6,7 +6,7 @@
 ; RUN: llc -mtriple=riscv64 -mcpu=sifive-u74 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=SFB64 %s
 ; RUN: llc -mtriple=riscv64 -mattr=+xventanacondops -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=CONDOPS64 %s
+; RUN:   | FileCheck -check-prefix=VTCONDOPS64 %s
 
 ; InstCombine canonicalizes (c ? x | y : x) to (x | (c ? y : 0)) similar for
 ; other binary operations using their identity value as the constant.
@@ -38,14 +38,14 @@ define signext i32 @and_select_all_ones_i32(i1 zeroext %c, i32 signext %x, i32 s
 ; SFB64-NEXT:    mv a0, a2
 ; SFB64-NEXT:    ret
 ;
-; CONDOPS64-LABEL: and_select_all_ones_i32:
-; CONDOPS64:       # %bb.0:
-; CONDOPS64-NEXT:    li a3, -1
-; CONDOPS64-NEXT:    vt.maskcn a3, a3, a0
-; CONDOPS64-NEXT:    vt.maskc a0, a1, a0
-; CONDOPS64-NEXT:    or a0, a0, a3
-; CONDOPS64-NEXT:    and a0, a0, a2
-; CONDOPS64-NEXT:    ret
+; VTCONDOPS64-LABEL: and_select_all_ones_i32:
+; VTCONDOPS64:       # %bb.0:
+; VTCONDOPS64-NEXT:    li a3, -1
+; VTCONDOPS64-NEXT:    vt.maskcn a3, a3, a0
+; VTCONDOPS64-NEXT:    vt.maskc a0, a1, a0
+; VTCONDOPS64-NEXT:    or a0, a0, a3
+; VTCONDOPS64-NEXT:    and a0, a0, a2
+; VTCONDOPS64-NEXT:    ret
   %a = select i1 %c, i32 %x, i32 -1
   %b = and i32 %a, %y
   ret i32 %b
@@ -77,14 +77,14 @@ define i64 @and_select_all_ones_i64(i1 zeroext %c, i64 %x, i64 %y) {
 ; SFB64-NEXT:    mv a0, a2
 ; SFB64-NEXT:    ret
 ;
-; CONDOPS64-LABEL: and_select_all_ones_i64:
-; CONDOPS64:       # %bb.0:
-; CONDOPS64-NEXT:    vt.maskcn a1, a1, a0
-; CONDOPS64-NEXT:    li a3, -1
-; CONDOPS64-NEXT:    vt.maskc a0, a3, a0
-; CONDOPS64-NEXT:    or a0, a0, a1
-; CONDOPS64-NEXT:    and a0, a2, a0
-; CONDOPS64-NEXT:    ret
+; VTCONDOPS64-LABEL: and_select_all_ones_i64:
+; VTCONDOPS64:       # %bb.0:
+; VTCONDOPS64-NEXT:    vt.maskcn a1, a1, a0
+; VTCONDOPS64-NEXT:    li a3, -1
+; VTCONDOPS64-NEXT:    vt.maskc a0, a3, a0
+; VTCONDOPS64-NEXT:    or a0, a0, a1
+; VTCONDOPS64-NEXT:    and a0, a2, a0
+; VTCONDOPS64-NEXT:    ret
   %a = select i1 %c, i64 -1, i64 %x
   %b = and i64 %y, %a
   ret i64 %b
@@ -114,11 +114,11 @@ define signext i32 @or_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32 s
 ; SFB64-NEXT:    mv a0, a2
 ; SFB64-NEXT:    ret
 ;
-; CONDOPS64-LABEL: or_select_all_zeros_i32:
-; CONDOPS64:       # %bb.0:
-; CONDOPS64-NEXT:    vt.maskc a0, a1, a0
-; CONDOPS64-NEXT:    or a0, a2, a0
-; CONDOPS64-NEXT:    ret
+; VTCONDOPS64-LABEL: or_select_all_zeros_i32:
+; VTCONDOPS64:       # %bb.0:
+; VTCONDOPS64-NEXT:    vt.maskc a0, a1, a0
+; VTCONDOPS64-NEXT:    or a0, a2, a0
+; VTCONDOPS64-NEXT:    ret
   %a = select i1 %c, i32 %x, i32 0
   %b = or i32 %y, %a
   ret i32 %b
@@ -150,11 +150,11 @@ define i64 @or_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
 ; SFB64-NEXT:    mv a0, a2
 ; SFB64-NEXT:    ret
 ;
-; CONDOPS64-LABEL: or_select_all_zeros_i64:
-; CONDOPS64:       # %bb.0:
-; CONDOPS64-NEXT:    vt.maskcn a0, a1, a0
-; CONDOPS64-NEXT:    or a0, a0, a2
-; CONDOPS64-NEXT:    ret
+; VTCONDOPS64-LABEL: or_select_all_zeros_i64:
+; VTCONDOPS64:       # %bb.0:
+; VTCONDOPS64-NEXT:    vt.maskcn a0, a1, a0
+; VTCONDOPS64-NEXT:    or a0, a0, a2
+; VTCONDOPS64-NEXT:    ret
   %a = select i1 %c, i64 0, i64 %x
   %b = or i64 %a, %y
   ret i64 %b
@@ -184,11 +184,11 @@ define signext i32 @xor_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32
 ; SFB64-NEXT:    mv a0, a2
 ; SFB64-NEXT:    ret
 ;
-; CONDOPS64-LABEL: xor_select_all_zeros_i32:
-; CONDOPS64:       # %bb.0:
-; CONDOPS64-NEXT:    vt.maskcn a0, a1, a0
-; CONDOPS64-NEXT:    xor a0, a2, a0
-; CONDOPS64-NEXT:    ret
+; VTCONDOPS64-LABEL: xor_select_all_zeros_i32:
+; VTCONDOPS64:       # %bb.0:
+; VTCONDOPS64-NEXT:    vt.maskcn a0, a1, a0
+; VTCONDOPS64-NEXT:    xor a0, a2, a0
+; VTCONDOPS64-NEXT:    ret
   %a = select i1 %c, i32 0, i32 %x
   %b = xor i32 %y, %a
   ret i32 %b
@@ -220,11 +220,11 @@ define i64 @xor_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
 ; SFB64-NEXT:    mv a0, a2
 ; SFB64-NEXT:    ret
 ;
-; CONDOPS64-LABEL: xor_select_all_zeros_i64:
-; CONDOPS64:       # %bb.0:
-; CONDOPS64-NEXT:    vt.maskc a0, a1, a0
-; CONDOPS64-NEXT:    xor a0, a0, a2
-; CONDOPS64-NEXT:    ret
+; VTCONDOPS64-LABEL: xor_select_all_zeros_i64:
+; VTCONDOPS64:       # %bb.0:
+; VTCONDOPS64-NEXT:    vt.maskc a0, a1, a0
+; VTCONDOPS64-NEXT:    xor a0, a0, a2
+; VTCONDOPS64-NEXT:    ret
   %a = select i1 %c, i64 %x, i64 0
   %b = xor i64 %a, %y
   ret i64 %b
@@ -254,11 +254,11 @@ define signext i32 @add_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32
 ; SFB64-NEXT:    mv a0, a2
 ; SFB64-NEXT:    ret
 ;
-; CONDOPS64-LABEL: add_select_all_zeros_i32:
-; CONDOPS64:       # %bb.0:
-; CONDOPS64-NEXT:    vt.maskcn a0, a1, a0
-; CONDOPS64-NEXT:    addw a0, a2, a0
-; CONDOPS64-NEXT:    ret
+; VTCONDOPS64-LABEL: add_select_all_zeros_i32:
+; VTCONDOPS64:       # %bb.0:
+; VTCONDOPS64-NEXT:    vt.maskcn a0, a1, a0
+; VTCONDOPS64-NEXT:    addw a0, a2, a0
+; VTCONDOPS64-NEXT:    ret
   %a = select i1 %c, i32 0, i32 %x
   %b = add i32 %y, %a
   ret i32 %b
@@ -292,11 +292,11 @@ define i64 @add_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
 ; SFB64-NEXT:    mv a0, a2
 ; SFB64-NEXT:    ret
 ;
-; CONDOPS64-LABEL: add_select_all_zeros_i64:
-; CONDOPS64:       # %bb.0:
-; CONDOPS64-NEXT:    vt.maskc a0, a1, a0
-; CONDOPS64-NEXT:    add a0, a0, a2
-; CONDOPS64-NEXT:    ret
+; VTCONDOPS64-LABEL: add_select_all_zeros_i64:
+; VTCONDOPS64:       # %bb.0:
+; VTCONDOPS64-NEXT:    vt.maskc a0, a1, a0
+; VTCONDOPS64-NEXT:    add a0, a0, a2
+; VTCONDOPS64-NEXT:    ret
   %a = select i1 %c, i64 %x, i64 0
   %b = add i64 %a, %y
   ret i64 %b
@@ -326,11 +326,11 @@ define signext i32 @sub_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32
 ; SFB64-NEXT:    mv a0, a2
 ; SFB64-NEXT:    ret
 ;
-; CONDOPS64-LABEL: sub_select_all_zeros_i32:
-; CONDOPS64:       # %bb.0:
-; CONDOPS64-NEXT:    vt.maskcn a0, a1, a0
-; CONDOPS64-NEXT:    subw a0, a2, a0
-; CONDOPS64-NEXT:    ret
+; VTCONDOPS64-LABEL: sub_select_all_zeros_i32:
+; VTCONDOPS64:       # %bb.0:
+; VTCONDOPS64-NEXT:    vt.maskcn a0, a1, a0
+; VTCONDOPS64-NEXT:    subw a0, a2, a0
+; VTCONDOPS64-NEXT:    ret
   %a = select i1 %c, i32 0, i32 %x
   %b = sub i32 %y, %a
   ret i32 %b
@@ -364,11 +364,11 @@ define i64 @sub_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
 ; SFB64-NEXT:    mv a0, a2
 ; SFB64-NEXT:    ret
 ;
-; CONDOPS64-LABEL: sub_select_all_zeros_i64:
-; CONDOPS64:       # %bb.0:
-; CONDOPS64-NEXT:    vt.maskc a0, a1, a0
-; CONDOPS64-NEXT:    sub a0, a2, a0
-; CONDOPS64-NEXT:    ret
+; VTCONDOPS64-LABEL: sub_select_all_zeros_i64:
+; VTCONDOPS64:       # %bb.0:
+; VTCONDOPS64-NEXT:    vt.maskc a0, a1, a0
+; VTCONDOPS64-NEXT:    sub a0, a2, a0
+; VTCONDOPS64-NEXT:    ret
   %a = select i1 %c, i64 %x, i64 0
   %b = sub i64 %y, %a
   ret i64 %b

diff  --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll
index acc441af049e..d1c83c60a92f 100644
--- a/llvm/test/CodeGen/RISCV/select.ll
+++ b/llvm/test/CodeGen/RISCV/select.ll
@@ -1,24 +1,24 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
-; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64,NOCONDOPS %s
-; RUN: llc -mtriple=riscv64 -mattr=+m,+xventanacondops -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64,CONDOPS %s
+; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECK32,RV32IM %s
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECK64,RV64IM %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+xventanacondops -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECK64,RV64IMXVTCONDOPS %s
 
 define i16 @select_xor_1(i16 %A, i8 %cond) {
-; RV32-LABEL: select_xor_1:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    slli a1, a1, 31
-; RV32-NEXT:    srai a1, a1, 31
-; RV32-NEXT:    andi a1, a1, 43
-; RV32-NEXT:    xor a0, a0, a1
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: select_xor_1:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    slli a1, a1, 63
-; RV64-NEXT:    srai a1, a1, 63
-; RV64-NEXT:    andi a1, a1, 43
-; RV64-NEXT:    xor a0, a0, a1
-; RV64-NEXT:    ret
+; CHECK32-LABEL: select_xor_1:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    slli a1, a1, 31
+; CHECK32-NEXT:    srai a1, a1, 31
+; CHECK32-NEXT:    andi a1, a1, 43
+; CHECK32-NEXT:    xor a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; CHECK64-LABEL: select_xor_1:
+; CHECK64:       # %bb.0: # %entry
+; CHECK64-NEXT:    slli a1, a1, 63
+; CHECK64-NEXT:    srai a1, a1, 63
+; CHECK64-NEXT:    andi a1, a1, 43
+; CHECK64-NEXT:    xor a0, a0, a1
+; CHECK64-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp eq i8 %and, 0
@@ -30,29 +30,29 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i16 @select_xor_1b(i16 %A, i8 %cond) {
-; RV32-LABEL: select_xor_1b:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    slli a1, a1, 31
-; RV32-NEXT:    srai a1, a1, 31
-; RV32-NEXT:    andi a1, a1, 43
-; RV32-NEXT:    xor a0, a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_xor_1b:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    slli a1, a1, 63
-; NOCONDOPS-NEXT:    srai a1, a1, 63
-; NOCONDOPS-NEXT:    andi a1, a1, 43
-; NOCONDOPS-NEXT:    xor a0, a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_xor_1b:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    andi a1, a1, 1
-; CONDOPS-NEXT:    li a2, 43
-; CONDOPS-NEXT:    vt.maskc a1, a2, a1
-; CONDOPS-NEXT:    xor a0, a0, a1
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_xor_1b:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    slli a1, a1, 31
+; CHECK32-NEXT:    srai a1, a1, 31
+; CHECK32-NEXT:    andi a1, a1, 43
+; CHECK32-NEXT:    xor a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_xor_1b:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    slli a1, a1, 63
+; RV64IM-NEXT:    srai a1, a1, 63
+; RV64IM-NEXT:    andi a1, a1, 43
+; RV64IM-NEXT:    xor a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_xor_1b:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    andi a1, a1, 1
+; RV64IMXVTCONDOPS-NEXT:    li a2, 43
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a2, a1
+; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp ne i8 %and, 1
@@ -62,21 +62,21 @@ entry:
 }
 
 define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) {
-; RV32-LABEL: select_xor_2:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    slli a2, a2, 31
-; RV32-NEXT:    srai a2, a2, 31
-; RV32-NEXT:    and a1, a2, a1
-; RV32-NEXT:    xor a0, a0, a1
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: select_xor_2:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    slli a2, a2, 63
-; RV64-NEXT:    srai a2, a2, 63
-; RV64-NEXT:    and a1, a2, a1
-; RV64-NEXT:    xor a0, a0, a1
-; RV64-NEXT:    ret
+; CHECK32-LABEL: select_xor_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    slli a2, a2, 31
+; CHECK32-NEXT:    srai a2, a2, 31
+; CHECK32-NEXT:    and a1, a2, a1
+; CHECK32-NEXT:    xor a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; CHECK64-LABEL: select_xor_2:
+; CHECK64:       # %bb.0: # %entry
+; CHECK64-NEXT:    slli a2, a2, 63
+; CHECK64-NEXT:    srai a2, a2, 63
+; CHECK64-NEXT:    and a1, a2, a1
+; CHECK64-NEXT:    xor a0, a0, a1
+; CHECK64-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp eq i8 %and, 0
@@ -88,28 +88,28 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) {
-; RV32-LABEL: select_xor_2b:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    slli a2, a2, 31
-; RV32-NEXT:    srai a2, a2, 31
-; RV32-NEXT:    and a1, a2, a1
-; RV32-NEXT:    xor a0, a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_xor_2b:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    slli a2, a2, 63
-; NOCONDOPS-NEXT:    srai a2, a2, 63
-; NOCONDOPS-NEXT:    and a1, a2, a1
-; NOCONDOPS-NEXT:    xor a0, a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_xor_2b:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    andi a2, a2, 1
-; CONDOPS-NEXT:    vt.maskc a1, a1, a2
-; CONDOPS-NEXT:    xor a0, a0, a1
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_xor_2b:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    slli a2, a2, 31
+; CHECK32-NEXT:    srai a2, a2, 31
+; CHECK32-NEXT:    and a1, a2, a1
+; CHECK32-NEXT:    xor a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_xor_2b:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    slli a2, a2, 63
+; RV64IM-NEXT:    srai a2, a2, 63
+; RV64IM-NEXT:    and a1, a2, a1
+; RV64IM-NEXT:    xor a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_xor_2b:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    andi a2, a2, 1
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp ne i8 %and, 1
@@ -119,29 +119,29 @@ entry:
 }
 
 define i16 @select_xor_3(i16 %A, i8 %cond) {
-; RV32-LABEL: select_xor_3:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    andi a1, a1, 1
-; RV32-NEXT:    addi a1, a1, -1
-; RV32-NEXT:    andi a1, a1, 43
-; RV32-NEXT:    xor a0, a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_xor_3:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    andi a1, a1, 1
-; NOCONDOPS-NEXT:    addiw a1, a1, -1
-; NOCONDOPS-NEXT:    andi a1, a1, 43
-; NOCONDOPS-NEXT:    xor a0, a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_xor_3:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    andi a1, a1, 1
-; CONDOPS-NEXT:    li a2, 43
-; CONDOPS-NEXT:    vt.maskcn a1, a2, a1
-; CONDOPS-NEXT:    xor a0, a0, a1
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_xor_3:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    andi a1, a1, 1
+; CHECK32-NEXT:    addi a1, a1, -1
+; CHECK32-NEXT:    andi a1, a1, 43
+; CHECK32-NEXT:    xor a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_xor_3:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    andi a1, a1, 1
+; RV64IM-NEXT:    addiw a1, a1, -1
+; RV64IM-NEXT:    andi a1, a1, 43
+; RV64IM-NEXT:    xor a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_xor_3:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    andi a1, a1, 1
+; RV64IMXVTCONDOPS-NEXT:    li a2, 43
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a2, a1
+; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp eq i8 %and, 0
@@ -153,29 +153,29 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i16 @select_xor_3b(i16 %A, i8 %cond) {
-; RV32-LABEL: select_xor_3b:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    andi a1, a1, 1
-; RV32-NEXT:    addi a1, a1, -1
-; RV32-NEXT:    andi a1, a1, 43
-; RV32-NEXT:    xor a0, a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_xor_3b:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    andi a1, a1, 1
-; NOCONDOPS-NEXT:    addiw a1, a1, -1
-; NOCONDOPS-NEXT:    andi a1, a1, 43
-; NOCONDOPS-NEXT:    xor a0, a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_xor_3b:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    andi a1, a1, 1
-; CONDOPS-NEXT:    li a2, 43
-; CONDOPS-NEXT:    vt.maskcn a1, a2, a1
-; CONDOPS-NEXT:    xor a0, a0, a1
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_xor_3b:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    andi a1, a1, 1
+; CHECK32-NEXT:    addi a1, a1, -1
+; CHECK32-NEXT:    andi a1, a1, 43
+; CHECK32-NEXT:    xor a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_xor_3b:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    andi a1, a1, 1
+; RV64IM-NEXT:    addiw a1, a1, -1
+; RV64IM-NEXT:    andi a1, a1, 43
+; RV64IM-NEXT:    xor a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_xor_3b:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    andi a1, a1, 1
+; RV64IMXVTCONDOPS-NEXT:    li a2, 43
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a2, a1
+; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp ne i8 %and, 1
@@ -185,28 +185,28 @@ entry:
 }
 
 define i32 @select_xor_4(i32 %A, i32 %B, i8 %cond) {
-; RV32-LABEL: select_xor_4:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    andi a2, a2, 1
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a1, a2, a1
-; RV32-NEXT:    xor a0, a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_xor_4:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    andi a2, a2, 1
-; NOCONDOPS-NEXT:    addi a2, a2, -1
-; NOCONDOPS-NEXT:    and a1, a2, a1
-; NOCONDOPS-NEXT:    xor a0, a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_xor_4:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    andi a2, a2, 1
-; CONDOPS-NEXT:    vt.maskcn a1, a1, a2
-; CONDOPS-NEXT:    xor a0, a0, a1
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_xor_4:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    andi a2, a2, 1
+; CHECK32-NEXT:    addi a2, a2, -1
+; CHECK32-NEXT:    and a1, a2, a1
+; CHECK32-NEXT:    xor a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_xor_4:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    andi a2, a2, 1
+; RV64IM-NEXT:    addi a2, a2, -1
+; RV64IM-NEXT:    and a1, a2, a1
+; RV64IM-NEXT:    xor a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_xor_4:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    andi a2, a2, 1
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp eq i8 %and, 0
@@ -218,28 +218,28 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i32 @select_xor_4b(i32 %A, i32 %B, i8 %cond) {
-; RV32-LABEL: select_xor_4b:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    andi a2, a2, 1
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a1, a2, a1
-; RV32-NEXT:    xor a0, a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_xor_4b:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    andi a2, a2, 1
-; NOCONDOPS-NEXT:    addi a2, a2, -1
-; NOCONDOPS-NEXT:    and a1, a2, a1
-; NOCONDOPS-NEXT:    xor a0, a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_xor_4b:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    andi a2, a2, 1
-; CONDOPS-NEXT:    vt.maskcn a1, a1, a2
-; CONDOPS-NEXT:    xor a0, a0, a1
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_xor_4b:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    andi a2, a2, 1
+; CHECK32-NEXT:    addi a2, a2, -1
+; CHECK32-NEXT:    and a1, a2, a1
+; CHECK32-NEXT:    xor a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_xor_4b:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    andi a2, a2, 1
+; RV64IM-NEXT:    addi a2, a2, -1
+; RV64IM-NEXT:    and a1, a2, a1
+; RV64IM-NEXT:    xor a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_xor_4b:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    andi a2, a2, 1
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp ne i8 %and, 1
@@ -249,21 +249,21 @@ entry:
 }
 
 define i32 @select_or(i32 %A, i32 %B, i8 %cond) {
-; RV32-LABEL: select_or:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    slli a2, a2, 31
-; RV32-NEXT:    srai a2, a2, 31
-; RV32-NEXT:    and a1, a2, a1
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: select_or:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    slli a2, a2, 63
-; RV64-NEXT:    srai a2, a2, 63
-; RV64-NEXT:    and a1, a2, a1
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    ret
+; CHECK32-LABEL: select_or:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    slli a2, a2, 31
+; CHECK32-NEXT:    srai a2, a2, 31
+; CHECK32-NEXT:    and a1, a2, a1
+; CHECK32-NEXT:    or a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; CHECK64-LABEL: select_or:
+; CHECK64:       # %bb.0: # %entry
+; CHECK64-NEXT:    slli a2, a2, 63
+; CHECK64-NEXT:    srai a2, a2, 63
+; CHECK64-NEXT:    and a1, a2, a1
+; CHECK64-NEXT:    or a0, a0, a1
+; CHECK64-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp eq i8 %and, 0
@@ -275,28 +275,28 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) {
-; RV32-LABEL: select_or_b:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    slli a2, a2, 31
-; RV32-NEXT:    srai a2, a2, 31
-; RV32-NEXT:    and a1, a2, a1
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_or_b:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    slli a2, a2, 63
-; NOCONDOPS-NEXT:    srai a2, a2, 63
-; NOCONDOPS-NEXT:    and a1, a2, a1
-; NOCONDOPS-NEXT:    or a0, a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_or_b:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    andi a2, a2, 1
-; CONDOPS-NEXT:    vt.maskc a1, a1, a2
-; CONDOPS-NEXT:    or a0, a0, a1
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_or_b:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    slli a2, a2, 31
+; CHECK32-NEXT:    srai a2, a2, 31
+; CHECK32-NEXT:    and a1, a2, a1
+; CHECK32-NEXT:    or a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_or_b:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    slli a2, a2, 63
+; RV64IM-NEXT:    srai a2, a2, 63
+; RV64IM-NEXT:    and a1, a2, a1
+; RV64IM-NEXT:    or a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_or_b:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    andi a2, a2, 1
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp ne i8 %and, 1
@@ -306,21 +306,21 @@ entry:
 }
 
 define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) {
-; RV32-LABEL: select_or_1:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    slli a2, a2, 31
-; RV32-NEXT:    srai a2, a2, 31
-; RV32-NEXT:    and a1, a2, a1
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: select_or_1:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    slli a2, a2, 63
-; RV64-NEXT:    srai a2, a2, 63
-; RV64-NEXT:    and a1, a2, a1
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    ret
+; CHECK32-LABEL: select_or_1:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    slli a2, a2, 31
+; CHECK32-NEXT:    srai a2, a2, 31
+; CHECK32-NEXT:    and a1, a2, a1
+; CHECK32-NEXT:    or a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; CHECK64-LABEL: select_or_1:
+; CHECK64:       # %bb.0: # %entry
+; CHECK64-NEXT:    slli a2, a2, 63
+; CHECK64-NEXT:    srai a2, a2, 63
+; CHECK64-NEXT:    and a1, a2, a1
+; CHECK64-NEXT:    or a0, a0, a1
+; CHECK64-NEXT:    ret
 entry:
  %and = and i32 %cond, 1
  %cmp10 = icmp eq i32 %and, 0
@@ -332,28 +332,28 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) {
-; RV32-LABEL: select_or_1b:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    slli a2, a2, 31
-; RV32-NEXT:    srai a2, a2, 31
-; RV32-NEXT:    and a1, a2, a1
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_or_1b:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    slli a2, a2, 63
-; NOCONDOPS-NEXT:    srai a2, a2, 63
-; NOCONDOPS-NEXT:    and a1, a2, a1
-; NOCONDOPS-NEXT:    or a0, a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_or_1b:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    andi a2, a2, 1
-; CONDOPS-NEXT:    vt.maskc a1, a1, a2
-; CONDOPS-NEXT:    or a0, a0, a1
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_or_1b:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    slli a2, a2, 31
+; CHECK32-NEXT:    srai a2, a2, 31
+; CHECK32-NEXT:    and a1, a2, a1
+; CHECK32-NEXT:    or a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_or_1b:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    slli a2, a2, 63
+; RV64IM-NEXT:    srai a2, a2, 63
+; RV64IM-NEXT:    and a1, a2, a1
+; RV64IM-NEXT:    or a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_or_1b:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    andi a2, a2, 1
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
  %and = and i32 %cond, 1
  %cmp10 = icmp ne i32 %and, 1
@@ -363,28 +363,28 @@ entry:
 }
 
 define i32 @select_or_2(i32 %A, i32 %B, i8 %cond) {
-; RV32-LABEL: select_or_2:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    andi a2, a2, 1
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a1, a2, a1
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_or_2:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    andi a2, a2, 1
-; NOCONDOPS-NEXT:    addi a2, a2, -1
-; NOCONDOPS-NEXT:    and a1, a2, a1
-; NOCONDOPS-NEXT:    or a0, a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_or_2:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    andi a2, a2, 1
-; CONDOPS-NEXT:    vt.maskcn a1, a1, a2
-; CONDOPS-NEXT:    or a0, a0, a1
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_or_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    andi a2, a2, 1
+; CHECK32-NEXT:    addi a2, a2, -1
+; CHECK32-NEXT:    and a1, a2, a1
+; CHECK32-NEXT:    or a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_or_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    andi a2, a2, 1
+; RV64IM-NEXT:    addi a2, a2, -1
+; RV64IM-NEXT:    and a1, a2, a1
+; RV64IM-NEXT:    or a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_or_2:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    andi a2, a2, 1
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp eq i8 %and, 0
@@ -396,28 +396,28 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i32 @select_or_2b(i32 %A, i32 %B, i8 %cond) {
-; RV32-LABEL: select_or_2b:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    andi a2, a2, 1
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a1, a2, a1
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_or_2b:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    andi a2, a2, 1
-; NOCONDOPS-NEXT:    addi a2, a2, -1
-; NOCONDOPS-NEXT:    and a1, a2, a1
-; NOCONDOPS-NEXT:    or a0, a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_or_2b:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    andi a2, a2, 1
-; CONDOPS-NEXT:    vt.maskcn a1, a1, a2
-; CONDOPS-NEXT:    or a0, a0, a1
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_or_2b:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    andi a2, a2, 1
+; CHECK32-NEXT:    addi a2, a2, -1
+; CHECK32-NEXT:    and a1, a2, a1
+; CHECK32-NEXT:    or a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_or_2b:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    andi a2, a2, 1
+; RV64IM-NEXT:    addi a2, a2, -1
+; RV64IM-NEXT:    and a1, a2, a1
+; RV64IM-NEXT:    or a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_or_2b:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    andi a2, a2, 1
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp ne i8 %and, 1
@@ -427,28 +427,28 @@ entry:
 }
 
 define i32 @select_or_3(i32 %A, i32 %B, i32 %cond) {
-; RV32-LABEL: select_or_3:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    andi a2, a2, 1
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a1, a2, a1
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_or_3:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    andi a2, a2, 1
-; NOCONDOPS-NEXT:    addi a2, a2, -1
-; NOCONDOPS-NEXT:    and a1, a2, a1
-; NOCONDOPS-NEXT:    or a0, a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_or_3:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    andi a2, a2, 1
-; CONDOPS-NEXT:    vt.maskcn a1, a1, a2
-; CONDOPS-NEXT:    or a0, a0, a1
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_or_3:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    andi a2, a2, 1
+; CHECK32-NEXT:    addi a2, a2, -1
+; CHECK32-NEXT:    and a1, a2, a1
+; CHECK32-NEXT:    or a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_or_3:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    andi a2, a2, 1
+; RV64IM-NEXT:    addi a2, a2, -1
+; RV64IM-NEXT:    and a1, a2, a1
+; RV64IM-NEXT:    or a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_or_3:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    andi a2, a2, 1
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
  %and = and i32 %cond, 1
  %cmp10 = icmp eq i32 %and, 0
@@ -460,28 +460,28 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i32 @select_or_3b(i32 %A, i32 %B, i32 %cond) {
-; RV32-LABEL: select_or_3b:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    andi a2, a2, 1
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a1, a2, a1
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_or_3b:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    andi a2, a2, 1
-; NOCONDOPS-NEXT:    addi a2, a2, -1
-; NOCONDOPS-NEXT:    and a1, a2, a1
-; NOCONDOPS-NEXT:    or a0, a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_or_3b:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    andi a2, a2, 1
-; CONDOPS-NEXT:    vt.maskcn a1, a1, a2
-; CONDOPS-NEXT:    or a0, a0, a1
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_or_3b:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    andi a2, a2, 1
+; CHECK32-NEXT:    addi a2, a2, -1
+; CHECK32-NEXT:    and a1, a2, a1
+; CHECK32-NEXT:    or a0, a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_or_3b:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    andi a2, a2, 1
+; RV64IM-NEXT:    addi a2, a2, -1
+; RV64IM-NEXT:    and a1, a2, a1
+; RV64IM-NEXT:    or a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_or_3b:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    andi a2, a2, 1
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
  %and = and i32 %cond, 1
  %cmp10 = icmp ne i32 %and, 1
@@ -491,29 +491,29 @@ entry:
 }
 
 define i32 @select_add_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_add_1:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    neg a0, a0
-; RV32-NEXT:    and a0, a0, a1
-; RV32-NEXT:    add a0, a2, a0
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_add_1:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    beqz a0, .LBB16_2
-; NOCONDOPS-NEXT:  # %bb.1:
-; NOCONDOPS-NEXT:    addw a2, a1, a2
-; NOCONDOPS-NEXT:  .LBB16_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a2
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_add_1:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    addw a1, a1, a2
-; CONDOPS-NEXT:    vt.maskcn a2, a2, a0
-; CONDOPS-NEXT:    vt.maskc a0, a1, a0
-; CONDOPS-NEXT:    or a0, a0, a2
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_add_1:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    neg a0, a0
+; CHECK32-NEXT:    and a0, a0, a1
+; CHECK32-NEXT:    add a0, a2, a0
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_add_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB16_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    addw a2, a1, a2
+; RV64IM-NEXT:  .LBB16_2: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_add_1:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    addw a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a2, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a2
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = add i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -521,29 +521,29 @@ entry:
 }
 
 define i32 @select_add_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_add_2:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    addi a0, a0, -1
-; RV32-NEXT:    and a0, a0, a2
-; RV32-NEXT:    add a0, a1, a0
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_add_2:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    bnez a0, .LBB17_2
-; NOCONDOPS-NEXT:  # %bb.1: # %entry
-; NOCONDOPS-NEXT:    addw a1, a1, a2
-; NOCONDOPS-NEXT:  .LBB17_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_add_2:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    addw a2, a1, a2
-; CONDOPS-NEXT:    vt.maskc a1, a1, a0
-; CONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; CONDOPS-NEXT:    or a0, a1, a0
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_add_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    addi a0, a0, -1
+; CHECK32-NEXT:    and a0, a0, a2
+; CHECK32-NEXT:    add a0, a1, a0
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_add_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    bnez a0, .LBB17_2
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    addw a1, a1, a2
+; RV64IM-NEXT:  .LBB17_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_add_2:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    addw a2, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = add i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -551,29 +551,29 @@ entry:
 }
 
 define i32 @select_add_3(i1 zeroext %cond, i32 %a) {
-; RV32-LABEL: select_add_3:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    addi a0, a0, -1
-; RV32-NEXT:    andi a0, a0, 42
-; RV32-NEXT:    add a0, a1, a0
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_add_3:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    bnez a0, .LBB18_2
-; NOCONDOPS-NEXT:  # %bb.1: # %entry
-; NOCONDOPS-NEXT:    addiw a1, a1, 42
-; NOCONDOPS-NEXT:  .LBB18_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_add_3:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    addiw a2, a1, 42
-; CONDOPS-NEXT:    vt.maskc a1, a1, a0
-; CONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; CONDOPS-NEXT:    or a0, a1, a0
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_add_3:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    addi a0, a0, -1
+; CHECK32-NEXT:    andi a0, a0, 42
+; CHECK32-NEXT:    add a0, a1, a0
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_add_3:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    bnez a0, .LBB18_2
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    addiw a1, a1, 42
+; RV64IM-NEXT:  .LBB18_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_add_3:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    addiw a2, a1, 42
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = add i32 %a, 42
   %res = select i1 %cond, i32 %a, i32 %c
@@ -581,31 +581,31 @@ entry:
 }
 
 define i32 @select_sub_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_sub_1:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    beqz a0, .LBB19_2
-; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    sub a2, a1, a2
-; RV32-NEXT:  .LBB19_2: # %entry
-; RV32-NEXT:    mv a0, a2
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_sub_1:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    beqz a0, .LBB19_2
-; NOCONDOPS-NEXT:  # %bb.1:
-; NOCONDOPS-NEXT:    subw a2, a1, a2
-; NOCONDOPS-NEXT:  .LBB19_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a2
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_sub_1:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    subw a1, a1, a2
-; CONDOPS-NEXT:    vt.maskcn a2, a2, a0
-; CONDOPS-NEXT:    vt.maskc a0, a1, a0
-; CONDOPS-NEXT:    or a0, a0, a2
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_sub_1:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    beqz a0, .LBB19_2
+; CHECK32-NEXT:  # %bb.1:
+; CHECK32-NEXT:    sub a2, a1, a2
+; CHECK32-NEXT:  .LBB19_2: # %entry
+; CHECK32-NEXT:    mv a0, a2
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_sub_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB19_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    subw a2, a1, a2
+; RV64IM-NEXT:  .LBB19_2: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_sub_1:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    subw a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a2, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a2
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = sub i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -613,29 +613,29 @@ entry:
 }
 
 define i32 @select_sub_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_sub_2:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    addi a0, a0, -1
-; RV32-NEXT:    and a0, a0, a2
-; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_sub_2:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    bnez a0, .LBB20_2
-; NOCONDOPS-NEXT:  # %bb.1: # %entry
-; NOCONDOPS-NEXT:    subw a1, a1, a2
-; NOCONDOPS-NEXT:  .LBB20_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_sub_2:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    subw a2, a1, a2
-; CONDOPS-NEXT:    vt.maskc a1, a1, a0
-; CONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; CONDOPS-NEXT:    or a0, a1, a0
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_sub_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    addi a0, a0, -1
+; CHECK32-NEXT:    and a0, a0, a2
+; CHECK32-NEXT:    sub a0, a1, a0
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_sub_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    bnez a0, .LBB20_2
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    subw a1, a1, a2
+; RV64IM-NEXT:  .LBB20_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_sub_2:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    subw a2, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = sub i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -643,29 +643,29 @@ entry:
 }
 
 define i32 @select_sub_3(i1 zeroext %cond, i32 %a) {
-; RV32-LABEL: select_sub_3:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    addi a0, a0, -1
-; RV32-NEXT:    andi a0, a0, 42
-; RV32-NEXT:    sub a0, a1, a0
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_sub_3:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    bnez a0, .LBB21_2
-; NOCONDOPS-NEXT:  # %bb.1: # %entry
-; NOCONDOPS-NEXT:    addiw a1, a1, -42
-; NOCONDOPS-NEXT:  .LBB21_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_sub_3:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    addiw a2, a1, -42
-; CONDOPS-NEXT:    vt.maskc a1, a1, a0
-; CONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; CONDOPS-NEXT:    or a0, a1, a0
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_sub_3:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    addi a0, a0, -1
+; CHECK32-NEXT:    andi a0, a0, 42
+; CHECK32-NEXT:    sub a0, a1, a0
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_sub_3:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    bnez a0, .LBB21_2
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    addiw a1, a1, -42
+; RV64IM-NEXT:  .LBB21_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_sub_3:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    addiw a2, a1, -42
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = sub i32 %a, 42
   %res = select i1 %cond, i32 %a, i32 %c
@@ -673,30 +673,30 @@ entry:
 }
 
 define i32 @select_and_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_and_1:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    beqz a0, .LBB22_2
-; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    and a2, a1, a2
-; RV32-NEXT:  .LBB22_2: # %entry
-; RV32-NEXT:    mv a0, a2
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_and_1:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    beqz a0, .LBB22_2
-; NOCONDOPS-NEXT:  # %bb.1:
-; NOCONDOPS-NEXT:    and a2, a1, a2
-; NOCONDOPS-NEXT:  .LBB22_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a2
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_and_1:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; CONDOPS-NEXT:    and a1, a2, a1
-; CONDOPS-NEXT:    or a0, a1, a0
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_and_1:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    beqz a0, .LBB22_2
+; CHECK32-NEXT:  # %bb.1:
+; CHECK32-NEXT:    and a2, a1, a2
+; CHECK32-NEXT:  .LBB22_2: # %entry
+; CHECK32-NEXT:    mv a0, a2
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_and_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB22_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    and a2, a1, a2
+; RV64IM-NEXT:  .LBB22_2: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_and_1:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    and a1, a2, a1
+; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = and i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -704,30 +704,30 @@ entry:
 }
 
 define i32 @select_and_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_and_2:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    bnez a0, .LBB23_2
-; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    and a1, a1, a2
-; RV32-NEXT:  .LBB23_2: # %entry
-; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_and_2:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    bnez a0, .LBB23_2
-; NOCONDOPS-NEXT:  # %bb.1: # %entry
-; NOCONDOPS-NEXT:    and a1, a1, a2
-; NOCONDOPS-NEXT:  .LBB23_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_and_2:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    vt.maskc a0, a1, a0
-; CONDOPS-NEXT:    and a1, a1, a2
-; CONDOPS-NEXT:    or a0, a1, a0
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_and_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    bnez a0, .LBB23_2
+; CHECK32-NEXT:  # %bb.1: # %entry
+; CHECK32-NEXT:    and a1, a1, a2
+; CHECK32-NEXT:  .LBB23_2: # %entry
+; CHECK32-NEXT:    mv a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_and_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    bnez a0, .LBB23_2
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    and a1, a1, a2
+; RV64IM-NEXT:  .LBB23_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_and_2:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    and a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = and i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -735,31 +735,31 @@ entry:
 }
 
 define i32 @select_and_3(i1 zeroext %cond, i32 %a) {
-; RV32-LABEL: select_and_3:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    bnez a0, .LBB24_2
-; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    andi a1, a1, 42
-; RV32-NEXT:  .LBB24_2: # %entry
-; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_and_3:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    bnez a0, .LBB24_2
-; NOCONDOPS-NEXT:  # %bb.1: # %entry
-; NOCONDOPS-NEXT:    andi a1, a1, 42
-; NOCONDOPS-NEXT:  .LBB24_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_and_3:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    vt.maskc a0, a1, a0
-; CONDOPS-NEXT:    li a2, 42
-; CONDOPS-NEXT:    and a1, a1, a2
-; CONDOPS-NEXT:    or a0, a1, a0
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_and_3:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    bnez a0, .LBB24_2
+; CHECK32-NEXT:  # %bb.1: # %entry
+; CHECK32-NEXT:    andi a1, a1, 42
+; CHECK32-NEXT:  .LBB24_2: # %entry
+; CHECK32-NEXT:    mv a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_and_3:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    bnez a0, .LBB24_2
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    andi a1, a1, 42
+; RV64IM-NEXT:  .LBB24_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_and_3:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    li a2, 42
+; RV64IMXVTCONDOPS-NEXT:    and a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = and i32 %a, 42
   %res = select i1 %cond, i32 %a, i32 %c
@@ -767,31 +767,31 @@ entry:
 }
 
 define i32 @select_udiv_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_udiv_1:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    beqz a0, .LBB25_2
-; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    divu a2, a1, a2
-; RV32-NEXT:  .LBB25_2: # %entry
-; RV32-NEXT:    mv a0, a2
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_udiv_1:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    beqz a0, .LBB25_2
-; NOCONDOPS-NEXT:  # %bb.1:
-; NOCONDOPS-NEXT:    divuw a2, a1, a2
-; NOCONDOPS-NEXT:  .LBB25_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a2
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_udiv_1:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    divuw a1, a1, a2
-; CONDOPS-NEXT:    vt.maskcn a2, a2, a0
-; CONDOPS-NEXT:    vt.maskc a0, a1, a0
-; CONDOPS-NEXT:    or a0, a0, a2
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_udiv_1:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    beqz a0, .LBB25_2
+; CHECK32-NEXT:  # %bb.1:
+; CHECK32-NEXT:    divu a2, a1, a2
+; CHECK32-NEXT:  .LBB25_2: # %entry
+; CHECK32-NEXT:    mv a0, a2
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_udiv_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB25_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    divuw a2, a1, a2
+; RV64IM-NEXT:  .LBB25_2: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_udiv_1:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    divuw a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a2, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a2
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = udiv i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -799,31 +799,31 @@ entry:
 }
 
 define i32 @select_udiv_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_udiv_2:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    bnez a0, .LBB26_2
-; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    divu a1, a1, a2
-; RV32-NEXT:  .LBB26_2: # %entry
-; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_udiv_2:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    bnez a0, .LBB26_2
-; NOCONDOPS-NEXT:  # %bb.1: # %entry
-; NOCONDOPS-NEXT:    divuw a1, a1, a2
-; NOCONDOPS-NEXT:  .LBB26_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_udiv_2:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    divuw a2, a1, a2
-; CONDOPS-NEXT:    vt.maskc a1, a1, a0
-; CONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; CONDOPS-NEXT:    or a0, a1, a0
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_udiv_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    bnez a0, .LBB26_2
+; CHECK32-NEXT:  # %bb.1: # %entry
+; CHECK32-NEXT:    divu a1, a1, a2
+; CHECK32-NEXT:  .LBB26_2: # %entry
+; CHECK32-NEXT:    mv a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_udiv_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    bnez a0, .LBB26_2
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    divuw a1, a1, a2
+; RV64IM-NEXT:  .LBB26_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_udiv_2:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    divuw a2, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = udiv i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -831,43 +831,43 @@ entry:
 }
 
 define i32 @select_udiv_3(i1 zeroext %cond, i32 %a) {
-; RV32-LABEL: select_udiv_3:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    bnez a0, .LBB27_2
-; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    srli a1, a1, 1
-; RV32-NEXT:    lui a0, 199729
-; RV32-NEXT:    addi a0, a0, -975
-; RV32-NEXT:    mulhu a1, a1, a0
-; RV32-NEXT:    srli a1, a1, 2
-; RV32-NEXT:  .LBB27_2: # %entry
-; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_udiv_3:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    bnez a0, .LBB27_2
-; NOCONDOPS-NEXT:  # %bb.1: # %entry
-; NOCONDOPS-NEXT:    srliw a0, a1, 1
-; NOCONDOPS-NEXT:    lui a1, 199729
-; NOCONDOPS-NEXT:    addiw a1, a1, -975
-; NOCONDOPS-NEXT:    mul a1, a0, a1
-; NOCONDOPS-NEXT:    srli a1, a1, 34
-; NOCONDOPS-NEXT:  .LBB27_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_udiv_3:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    srliw a2, a1, 1
-; CONDOPS-NEXT:    lui a3, 199729
-; CONDOPS-NEXT:    addiw a3, a3, -975
-; CONDOPS-NEXT:    mul a2, a2, a3
-; CONDOPS-NEXT:    srli a2, a2, 34
-; CONDOPS-NEXT:    vt.maskc a1, a1, a0
-; CONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; CONDOPS-NEXT:    or a0, a1, a0
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_udiv_3:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    bnez a0, .LBB27_2
+; CHECK32-NEXT:  # %bb.1: # %entry
+; CHECK32-NEXT:    srli a1, a1, 1
+; CHECK32-NEXT:    lui a0, 199729
+; CHECK32-NEXT:    addi a0, a0, -975
+; CHECK32-NEXT:    mulhu a1, a1, a0
+; CHECK32-NEXT:    srli a1, a1, 2
+; CHECK32-NEXT:  .LBB27_2: # %entry
+; CHECK32-NEXT:    mv a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_udiv_3:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    bnez a0, .LBB27_2
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    srliw a0, a1, 1
+; RV64IM-NEXT:    lui a1, 199729
+; RV64IM-NEXT:    addiw a1, a1, -975
+; RV64IM-NEXT:    mul a1, a0, a1
+; RV64IM-NEXT:    srli a1, a1, 34
+; RV64IM-NEXT:  .LBB27_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_udiv_3:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    srliw a2, a1, 1
+; RV64IMXVTCONDOPS-NEXT:    lui a3, 199729
+; RV64IMXVTCONDOPS-NEXT:    addiw a3, a3, -975
+; RV64IMXVTCONDOPS-NEXT:    mul a2, a2, a3
+; RV64IMXVTCONDOPS-NEXT:    srli a2, a2, 34
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = udiv i32 %a, 42
   %res = select i1 %cond, i32 %a, i32 %c
@@ -875,31 +875,31 @@ entry:
 }
 
 define i32 @select_shl_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_shl_1:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    beqz a0, .LBB28_2
-; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    sll a2, a1, a2
-; RV32-NEXT:  .LBB28_2: # %entry
-; RV32-NEXT:    mv a0, a2
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_shl_1:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    beqz a0, .LBB28_2
-; NOCONDOPS-NEXT:  # %bb.1:
-; NOCONDOPS-NEXT:    sllw a2, a1, a2
-; NOCONDOPS-NEXT:  .LBB28_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a2
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_shl_1:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    sllw a1, a1, a2
-; CONDOPS-NEXT:    vt.maskcn a2, a2, a0
-; CONDOPS-NEXT:    vt.maskc a0, a1, a0
-; CONDOPS-NEXT:    or a0, a0, a2
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_shl_1:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    beqz a0, .LBB28_2
+; CHECK32-NEXT:  # %bb.1:
+; CHECK32-NEXT:    sll a2, a1, a2
+; CHECK32-NEXT:  .LBB28_2: # %entry
+; CHECK32-NEXT:    mv a0, a2
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_shl_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB28_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    sllw a2, a1, a2
+; RV64IM-NEXT:  .LBB28_2: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_shl_1:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    sllw a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a2, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a2
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = shl i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -907,31 +907,31 @@ entry:
 }
 
 define i32 @select_shl_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_shl_2:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    bnez a0, .LBB29_2
-; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    sll a1, a1, a2
-; RV32-NEXT:  .LBB29_2: # %entry
-; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_shl_2:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    bnez a0, .LBB29_2
-; NOCONDOPS-NEXT:  # %bb.1: # %entry
-; NOCONDOPS-NEXT:    sllw a1, a1, a2
-; NOCONDOPS-NEXT:  .LBB29_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_shl_2:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    sllw a2, a1, a2
-; CONDOPS-NEXT:    vt.maskc a1, a1, a0
-; CONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; CONDOPS-NEXT:    or a0, a1, a0
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_shl_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    bnez a0, .LBB29_2
+; CHECK32-NEXT:  # %bb.1: # %entry
+; CHECK32-NEXT:    sll a1, a1, a2
+; CHECK32-NEXT:  .LBB29_2: # %entry
+; CHECK32-NEXT:    mv a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_shl_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    bnez a0, .LBB29_2
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    sllw a1, a1, a2
+; RV64IM-NEXT:  .LBB29_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_shl_2:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    sllw a2, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = shl i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -950,31 +950,31 @@ entry:
 }
 
 define i32 @select_ashr_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_ashr_1:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    beqz a0, .LBB31_2
-; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    sra a2, a1, a2
-; RV32-NEXT:  .LBB31_2: # %entry
-; RV32-NEXT:    mv a0, a2
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_ashr_1:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    beqz a0, .LBB31_2
-; NOCONDOPS-NEXT:  # %bb.1:
-; NOCONDOPS-NEXT:    sraw a2, a1, a2
-; NOCONDOPS-NEXT:  .LBB31_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a2
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_ashr_1:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    sraw a1, a1, a2
-; CONDOPS-NEXT:    vt.maskcn a2, a2, a0
-; CONDOPS-NEXT:    vt.maskc a0, a1, a0
-; CONDOPS-NEXT:    or a0, a0, a2
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_ashr_1:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    beqz a0, .LBB31_2
+; CHECK32-NEXT:  # %bb.1:
+; CHECK32-NEXT:    sra a2, a1, a2
+; CHECK32-NEXT:  .LBB31_2: # %entry
+; CHECK32-NEXT:    mv a0, a2
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_ashr_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB31_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    sraw a2, a1, a2
+; RV64IM-NEXT:  .LBB31_2: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_ashr_1:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    sraw a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a2, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a2
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = ashr i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -982,31 +982,31 @@ entry:
 }
 
 define i32 @select_ashr_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_ashr_2:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    bnez a0, .LBB32_2
-; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    sra a1, a1, a2
-; RV32-NEXT:  .LBB32_2: # %entry
-; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_ashr_2:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    bnez a0, .LBB32_2
-; NOCONDOPS-NEXT:  # %bb.1: # %entry
-; NOCONDOPS-NEXT:    sraw a1, a1, a2
-; NOCONDOPS-NEXT:  .LBB32_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_ashr_2:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    sraw a2, a1, a2
-; CONDOPS-NEXT:    vt.maskc a1, a1, a0
-; CONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; CONDOPS-NEXT:    or a0, a1, a0
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_ashr_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    bnez a0, .LBB32_2
+; CHECK32-NEXT:  # %bb.1: # %entry
+; CHECK32-NEXT:    sra a1, a1, a2
+; CHECK32-NEXT:  .LBB32_2: # %entry
+; CHECK32-NEXT:    mv a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_ashr_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    bnez a0, .LBB32_2
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    sraw a1, a1, a2
+; RV64IM-NEXT:  .LBB32_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_ashr_2:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    sraw a2, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = ashr i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -1025,31 +1025,31 @@ entry:
 }
 
 define i32 @select_lshr_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_lshr_1:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    beqz a0, .LBB34_2
-; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    srl a2, a1, a2
-; RV32-NEXT:  .LBB34_2: # %entry
-; RV32-NEXT:    mv a0, a2
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_lshr_1:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    beqz a0, .LBB34_2
-; NOCONDOPS-NEXT:  # %bb.1:
-; NOCONDOPS-NEXT:    srlw a2, a1, a2
-; NOCONDOPS-NEXT:  .LBB34_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a2
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_lshr_1:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    srlw a1, a1, a2
-; CONDOPS-NEXT:    vt.maskcn a2, a2, a0
-; CONDOPS-NEXT:    vt.maskc a0, a1, a0
-; CONDOPS-NEXT:    or a0, a0, a2
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_lshr_1:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    beqz a0, .LBB34_2
+; CHECK32-NEXT:  # %bb.1:
+; CHECK32-NEXT:    srl a2, a1, a2
+; CHECK32-NEXT:  .LBB34_2: # %entry
+; CHECK32-NEXT:    mv a0, a2
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_lshr_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB34_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    srlw a2, a1, a2
+; RV64IM-NEXT:  .LBB34_2: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_lshr_1:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    srlw a1, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a2, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a2
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = lshr i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -1057,31 +1057,31 @@ entry:
 }
 
 define i32 @select_lshr_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; RV32-LABEL: select_lshr_2:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    bnez a0, .LBB35_2
-; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    srl a1, a1, a2
-; RV32-NEXT:  .LBB35_2: # %entry
-; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:    ret
-;
-; NOCONDOPS-LABEL: select_lshr_2:
-; NOCONDOPS:       # %bb.0: # %entry
-; NOCONDOPS-NEXT:    bnez a0, .LBB35_2
-; NOCONDOPS-NEXT:  # %bb.1: # %entry
-; NOCONDOPS-NEXT:    srlw a1, a1, a2
-; NOCONDOPS-NEXT:  .LBB35_2: # %entry
-; NOCONDOPS-NEXT:    mv a0, a1
-; NOCONDOPS-NEXT:    ret
-;
-; CONDOPS-LABEL: select_lshr_2:
-; CONDOPS:       # %bb.0: # %entry
-; CONDOPS-NEXT:    srlw a2, a1, a2
-; CONDOPS-NEXT:    vt.maskc a1, a1, a0
-; CONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; CONDOPS-NEXT:    or a0, a1, a0
-; CONDOPS-NEXT:    ret
+; CHECK32-LABEL: select_lshr_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    bnez a0, .LBB35_2
+; CHECK32-NEXT:  # %bb.1: # %entry
+; CHECK32-NEXT:    srl a1, a1, a2
+; CHECK32-NEXT:  .LBB35_2: # %entry
+; CHECK32-NEXT:    mv a0, a1
+; CHECK32-NEXT:    ret
+;
+; RV64IM-LABEL: select_lshr_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    bnez a0, .LBB35_2
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    srlw a1, a1, a2
+; RV64IM-NEXT:  .LBB35_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_lshr_2:
+; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    srlw a2, a1, a2
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    ret
 entry:
   %c = lshr i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -1098,3 +1098,5 @@ entry:
   %res = select i1 %cond, i32 %a, i32 %c
   ret i32 %res
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32IM: {{.*}}


        


More information about the llvm-commits mailing list