[llvm] 1355458 - [RISCV] Move Shift Ones instructions from Zbb to Zbp to match 0.93 bitmanip spec.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 22 12:51:04 PST 2021


Author: Craig Topper
Date: 2021-01-22T12:49:10-08:00
New Revision: 1355458ef665b3044e3dfb57acf0c2e7439560fe

URL: https://github.com/llvm/llvm-project/commit/1355458ef665b3044e3dfb57acf0c2e7439560fe
DIFF: https://github.com/llvm/llvm-project/commit/1355458ef665b3044e3dfb57acf0c2e7439560fe.diff

LOG: [RISCV] Move Shift Ones instructions from Zbb to Zbp to match 0.93 bitmanip spec.

It's not really clear in the spec that these are in Zbp now, but
that's what I've gather from previous commits to the spec. I've
file an issue to get it documented properly.

Reviewed By: asb, frasercrmck

Differential Revision: https://reviews.llvm.org/D94652

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoB.td
    llvm/test/CodeGen/RISCV/rv32Zbb.ll
    llvm/test/CodeGen/RISCV/rv32Zbp.ll
    llvm/test/CodeGen/RISCV/rv64Zbb.ll
    llvm/test/CodeGen/RISCV/rv64Zbp.ll
    llvm/test/MC/RISCV/rv32zbb-invalid.s
    llvm/test/MC/RISCV/rv32zbb-valid.s
    llvm/test/MC/RISCV/rv32zbp-invalid.s
    llvm/test/MC/RISCV/rv32zbp-valid.s
    llvm/test/MC/RISCV/rv64zbb-invalid.s
    llvm/test/MC/RISCV/rv64zbb-valid.s
    llvm/test/MC/RISCV/rv64zbp-invalid.s
    llvm/test/MC/RISCV/rv64zbp-valid.s

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
index ef0a29d40893..8ac886228249 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
@@ -230,10 +230,10 @@ def SH2ADD : ALU_rr<0b0010000, 0b100, "sh2add">, Sched<[]>;
 def SH3ADD : ALU_rr<0b0010000, 0b110, "sh3add">, Sched<[]>;
 } // Predicates = [HasStdExtZba]
 
-let Predicates = [HasStdExtZbb] in {
+let Predicates = [HasStdExtZbp] in {
 def SLO  : ALU_rr<0b0010000, 0b001, "slo">, Sched<[]>;
 def SRO  : ALU_rr<0b0010000, 0b101, "sro">, Sched<[]>;
-} // Predicates = [HasStdExtZbb]
+} // Predicates = [HasStdExtZbp]
 
 let Predicates = [HasStdExtZbbOrZbp] in {
 def ROL   : ALU_rr<0b0110000, 0b001, "rol">, Sched<[]>;
@@ -252,10 +252,10 @@ def GORC : ALU_rr<0b0010100, 0b101, "gorc">, Sched<[]>;
 def GREV : ALU_rr<0b0110100, 0b101, "grev">, Sched<[]>;
 } // Predicates = [HasStdExtZbp]
 
-let Predicates = [HasStdExtZbb] in {
+let Predicates = [HasStdExtZbp] in {
 def SLOI : RVBShift_ri<0b00100, 0b001, OPC_OP_IMM, "sloi">, Sched<[]>;
 def SROI : RVBShift_ri<0b00100, 0b101, OPC_OP_IMM, "sroi">, Sched<[]>;
-} // Predicates = [HasStdExtZbb]
+} // Predicates = [HasStdExtZbp]
 
 let Predicates = [HasStdExtZbbOrZbp] in
 def RORI  : RVBShift_ri<0b01100, 0b101, OPC_OP_IMM, "rori">, Sched<[]>;
@@ -383,10 +383,10 @@ def SH2ADDUW : ALUW_rr<0b0010000, 0b100, "sh2add.uw">, Sched<[]>;
 def SH3ADDUW : ALUW_rr<0b0010000, 0b110, "sh3add.uw">, Sched<[]>;
 } // Predicates = [HasStdExtZbb, IsRV64]
 
-let Predicates = [HasStdExtZbb, IsRV64] in {
+let Predicates = [HasStdExtZbp, IsRV64] in {
 def SLOW   : ALUW_rr<0b0010000, 0b001, "slow">, Sched<[]>;
 def SROW   : ALUW_rr<0b0010000, 0b101, "srow">, Sched<[]>;
-} // Predicates = [HasStdExtZbb, IsRV64]
+} // Predicates = [HasStdExtZbp, IsRV64]
 
 let Predicates = [HasStdExtZbbOrZbp, IsRV64] in {
 def ROLW  : ALUW_rr<0b0110000, 0b001, "rolw">, Sched<[]>;
@@ -405,10 +405,10 @@ def GORCW  : ALUW_rr<0b0010100, 0b101, "gorcw">, Sched<[]>;
 def GREVW  : ALUW_rr<0b0110100, 0b101, "grevw">, Sched<[]>;
 } // Predicates = [HasStdExtZbp, IsRV64]
 
-let Predicates = [HasStdExtZbb, IsRV64] in {
+let Predicates = [HasStdExtZbp, IsRV64] in {
 def SLOIW  : RVBShiftW_ri<0b0010000, 0b001, OPC_OP_IMM_32, "sloiw">, Sched<[]>;
 def SROIW  : RVBShiftW_ri<0b0010000, 0b101, OPC_OP_IMM_32, "sroiw">, Sched<[]>;
-} // Predicates = [HasStdExtZbb, IsRV64]
+} // Predicates = [HasStdExtZbp, IsRV64]
 
 let Predicates = [HasStdExtZbbOrZbp, IsRV64] in
 def RORIW : RVBShiftW_ri<0b0110000, 0b101, OPC_OP_IMM_32, "roriw">, Sched<[]>;
@@ -637,12 +637,12 @@ def : Pat<(or  GPR:$rs1, (not GPR:$rs2)), (ORN  GPR:$rs1, GPR:$rs2)>;
 def : Pat<(xor GPR:$rs1, (not GPR:$rs2)), (XNOR GPR:$rs1, GPR:$rs2)>;
 } // Predicates = [HasStdExtZbbOrZbp]
 
-let Predicates = [HasStdExtZbb] in {
+let Predicates = [HasStdExtZbp] in {
 def : Pat<(not (shl (not GPR:$rs1), GPR:$rs2)),
           (SLO GPR:$rs1, GPR:$rs2)>;
 def : Pat<(not (srl (not GPR:$rs1), GPR:$rs2)),
           (SRO GPR:$rs1, GPR:$rs2)>;
-} // Predicates = [HasStdExtZbb]
+} // Predicates = [HasStdExtZbp]
 
 let Predicates = [HasStdExtZbbOrZbp] in {
 def : Pat<(rotl GPR:$rs1, GPR:$rs2), (ROL GPR:$rs1, GPR:$rs2)>;
@@ -671,12 +671,12 @@ def : Pat<(xor GPR:$rs1, SBSETINVMask:$mask),
           (SBINVI GPR:$rs1, (SBSETINVXForm imm:$mask))>;
 }
 
-let Predicates = [HasStdExtZbb] in {
+let Predicates = [HasStdExtZbp] in {
 def : Pat<(SLOIPat GPR:$rs1, uimmlog2xlen:$shamt),
           (SLOI GPR:$rs1, uimmlog2xlen:$shamt)>;
 def : Pat<(SROIPat GPR:$rs1, uimmlog2xlen:$shamt),
           (SROI GPR:$rs1, uimmlog2xlen:$shamt)>;
-} // Predicates = [HasStdExtZbb]
+} // Predicates = [HasStdExtZbp]
 
 // There's no encoding for roli in the current version of the 'B' extension
 // (v0.92) as it can be implemented with rori by negating the immediate.
@@ -856,12 +856,12 @@ def : Pat<(add GPR:$rs1, (and GPR:$rs2, (i64 0xFFFFFFFF))),
           (ADDUW GPR:$rs1, GPR:$rs2)>;
 }
 
-let Predicates = [HasStdExtZbb, IsRV64] in {
+let Predicates = [HasStdExtZbp, IsRV64] in {
 def : Pat<(not (riscv_sllw (not GPR:$rs1), GPR:$rs2)),
           (SLOW GPR:$rs1, GPR:$rs2)>;
 def : Pat<(not (riscv_srlw (not GPR:$rs1), GPR:$rs2)),
           (SROW GPR:$rs1, GPR:$rs2)>;
-} // Predicates = [HasStdExtZbb, IsRV64]
+} // Predicates = [HasStdExtZbp, IsRV64]
 
 let Predicates = [HasStdExtZbbOrZbp, IsRV64] in {
 def : Pat<(riscv_rolw GPR:$rs1, GPR:$rs2),
@@ -902,12 +902,12 @@ def : Pat<(xor (assertsexti32 GPR:$rs1), SBSETINVWMask:$mask),
 
 } // Predicates = [HasStdExtZbs, IsRV64]
 
-let Predicates = [HasStdExtZbb, IsRV64] in {
+let Predicates = [HasStdExtZbp, IsRV64] in {
 def : Pat<(sext_inreg (SLOIPat GPR:$rs1, uimm5:$shamt), i32),
           (SLOIW GPR:$rs1, uimm5:$shamt)>;
 def : Pat<(SROIWPat GPR:$rs1, uimm5:$shamt),
           (SROIW GPR:$rs1, uimm5:$shamt)>;
-} // Predicates = [HasStdExtZbb, IsRV64]
+} // Predicates = [HasStdExtZbp, IsRV64]
 
 let Predicates = [HasStdExtZbp, IsRV64] in {
 def : Pat<(riscv_rorw (riscv_greviw GPR:$rs1, 24), (i64 16)), (GREVIW GPR:$rs1, 8)>;

diff  --git a/llvm/test/CodeGen/RISCV/rv32Zbb.ll b/llvm/test/CodeGen/RISCV/rv32Zbb.ll
index 9e448d67cfa7..8d03d733114d 100644
--- a/llvm/test/CodeGen/RISCV/rv32Zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32Zbb.ll
@@ -6,308 +6,6 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbb -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32IBB
 
-define i32 @slo_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: slo_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    sll a0, a0, a1
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    ret
-;
-; RV32IB-LABEL: slo_i32:
-; RV32IB:       # %bb.0:
-; RV32IB-NEXT:    slo a0, a0, a1
-; RV32IB-NEXT:    ret
-;
-; RV32IBB-LABEL: slo_i32:
-; RV32IBB:       # %bb.0:
-; RV32IBB-NEXT:    slo a0, a0, a1
-; RV32IBB-NEXT:    ret
-  %neg = xor i32 %a, -1
-  %shl = shl i32 %neg, %b
-  %neg1 = xor i32 %shl, -1
-  ret i32 %neg1
-}
-
-; As we are not matching directly i64 code patterns on RV32 some i64 patterns
-; don't have yet any matching bit manipulation instructions on RV32.
-; This test is presented here in case future expansions of the experimental-b
-; extension introduce instructions suitable for this pattern.
-
-define i64 @slo_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: slo_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a3, a2, -32
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    bltz a3, .LBB1_2
-; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    sll a1, a0, a3
-; RV32I-NEXT:    j .LBB1_3
-; RV32I-NEXT:  .LBB1_2:
-; RV32I-NEXT:    not a1, a1
-; RV32I-NEXT:    sll a1, a1, a2
-; RV32I-NEXT:    addi a3, zero, 31
-; RV32I-NEXT:    sub a3, a3, a2
-; RV32I-NEXT:    srli a4, a0, 1
-; RV32I-NEXT:    srl a3, a4, a3
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    sll a2, a0, a2
-; RV32I-NEXT:  .LBB1_3:
-; RV32I-NEXT:    not a1, a1
-; RV32I-NEXT:    not a0, a2
-; RV32I-NEXT:    ret
-;
-; RV32IB-LABEL: slo_i64:
-; RV32IB:       # %bb.0:
-; RV32IB-NEXT:    not a0, a0
-; RV32IB-NEXT:    not a1, a1
-; RV32IB-NEXT:    sll a1, a1, a2
-; RV32IB-NEXT:    addi a3, zero, 31
-; RV32IB-NEXT:    sub a3, a3, a2
-; RV32IB-NEXT:    srli a4, a0, 1
-; RV32IB-NEXT:    srl a3, a4, a3
-; RV32IB-NEXT:    or a1, a1, a3
-; RV32IB-NEXT:    addi a3, a2, -32
-; RV32IB-NEXT:    sll a4, a0, a3
-; RV32IB-NEXT:    slti a5, a3, 0
-; RV32IB-NEXT:    cmov a1, a5, a1, a4
-; RV32IB-NEXT:    sll a0, a0, a2
-; RV32IB-NEXT:    srai a2, a3, 31
-; RV32IB-NEXT:    and a0, a2, a0
-; RV32IB-NEXT:    not a1, a1
-; RV32IB-NEXT:    not a0, a0
-; RV32IB-NEXT:    ret
-;
-; RV32IBB-LABEL: slo_i64:
-; RV32IBB:       # %bb.0:
-; RV32IBB-NEXT:    addi a3, a2, -32
-; RV32IBB-NEXT:    not a0, a0
-; RV32IBB-NEXT:    bltz a3, .LBB1_2
-; RV32IBB-NEXT:  # %bb.1:
-; RV32IBB-NEXT:    mv a2, zero
-; RV32IBB-NEXT:    sll a1, a0, a3
-; RV32IBB-NEXT:    j .LBB1_3
-; RV32IBB-NEXT:  .LBB1_2:
-; RV32IBB-NEXT:    not a1, a1
-; RV32IBB-NEXT:    sll a1, a1, a2
-; RV32IBB-NEXT:    addi a3, zero, 31
-; RV32IBB-NEXT:    sub a3, a3, a2
-; RV32IBB-NEXT:    srli a4, a0, 1
-; RV32IBB-NEXT:    srl a3, a4, a3
-; RV32IBB-NEXT:    or a1, a1, a3
-; RV32IBB-NEXT:    sll a2, a0, a2
-; RV32IBB-NEXT:  .LBB1_3:
-; RV32IBB-NEXT:    not a1, a1
-; RV32IBB-NEXT:    not a0, a2
-; RV32IBB-NEXT:    ret
-  %neg = xor i64 %a, -1
-  %shl = shl i64 %neg, %b
-  %neg1 = xor i64 %shl, -1
-  ret i64 %neg1
-}
-
-define i32 @sro_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: sro_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    srl a0, a0, a1
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    ret
-;
-; RV32IB-LABEL: sro_i32:
-; RV32IB:       # %bb.0:
-; RV32IB-NEXT:    sro a0, a0, a1
-; RV32IB-NEXT:    ret
-;
-; RV32IBB-LABEL: sro_i32:
-; RV32IBB:       # %bb.0:
-; RV32IBB-NEXT:    sro a0, a0, a1
-; RV32IBB-NEXT:    ret
-  %neg = xor i32 %a, -1
-  %shr = lshr i32 %neg, %b
-  %neg1 = xor i32 %shr, -1
-  ret i32 %neg1
-}
-
-; As we are not matching directly i64 code patterns on RV32 some i64 patterns
-; don't have yet any matching bit manipulation instructions on RV32.
-; This test is presented here in case future expansions of the experimental-b
-; extension introduce instructions suitable for this pattern.
-
-define i64 @sro_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: sro_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a3, a2, -32
-; RV32I-NEXT:    not a1, a1
-; RV32I-NEXT:    bltz a3, .LBB3_2
-; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    srl a0, a1, a3
-; RV32I-NEXT:    j .LBB3_3
-; RV32I-NEXT:  .LBB3_2:
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    srl a0, a0, a2
-; RV32I-NEXT:    addi a3, zero, 31
-; RV32I-NEXT:    sub a3, a3, a2
-; RV32I-NEXT:    slli a4, a1, 1
-; RV32I-NEXT:    sll a3, a4, a3
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    srl a2, a1, a2
-; RV32I-NEXT:  .LBB3_3:
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    not a1, a2
-; RV32I-NEXT:    ret
-;
-; RV32IB-LABEL: sro_i64:
-; RV32IB:       # %bb.0:
-; RV32IB-NEXT:    not a1, a1
-; RV32IB-NEXT:    not a0, a0
-; RV32IB-NEXT:    srl a0, a0, a2
-; RV32IB-NEXT:    addi a3, zero, 31
-; RV32IB-NEXT:    sub a3, a3, a2
-; RV32IB-NEXT:    slli a4, a1, 1
-; RV32IB-NEXT:    sll a3, a4, a3
-; RV32IB-NEXT:    or a0, a0, a3
-; RV32IB-NEXT:    addi a3, a2, -32
-; RV32IB-NEXT:    srl a4, a1, a3
-; RV32IB-NEXT:    slti a5, a3, 0
-; RV32IB-NEXT:    cmov a0, a5, a0, a4
-; RV32IB-NEXT:    srl a1, a1, a2
-; RV32IB-NEXT:    srai a2, a3, 31
-; RV32IB-NEXT:    and a1, a2, a1
-; RV32IB-NEXT:    not a0, a0
-; RV32IB-NEXT:    not a1, a1
-; RV32IB-NEXT:    ret
-;
-; RV32IBB-LABEL: sro_i64:
-; RV32IBB:       # %bb.0:
-; RV32IBB-NEXT:    addi a3, a2, -32
-; RV32IBB-NEXT:    not a1, a1
-; RV32IBB-NEXT:    bltz a3, .LBB3_2
-; RV32IBB-NEXT:  # %bb.1:
-; RV32IBB-NEXT:    mv a2, zero
-; RV32IBB-NEXT:    srl a0, a1, a3
-; RV32IBB-NEXT:    j .LBB3_3
-; RV32IBB-NEXT:  .LBB3_2:
-; RV32IBB-NEXT:    not a0, a0
-; RV32IBB-NEXT:    srl a0, a0, a2
-; RV32IBB-NEXT:    addi a3, zero, 31
-; RV32IBB-NEXT:    sub a3, a3, a2
-; RV32IBB-NEXT:    slli a4, a1, 1
-; RV32IBB-NEXT:    sll a3, a4, a3
-; RV32IBB-NEXT:    or a0, a0, a3
-; RV32IBB-NEXT:    srl a2, a1, a2
-; RV32IBB-NEXT:  .LBB3_3:
-; RV32IBB-NEXT:    not a0, a0
-; RV32IBB-NEXT:    not a1, a2
-; RV32IBB-NEXT:    ret
-  %neg = xor i64 %a, -1
-  %shr = lshr i64 %neg, %b
-  %neg1 = xor i64 %shr, -1
-  ret i64 %neg1
-}
-
-define i32 @sloi_i32(i32 %a) nounwind {
-; RV32I-LABEL: sloi_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    ori a0, a0, 1
-; RV32I-NEXT:    ret
-;
-; RV32IB-LABEL: sloi_i32:
-; RV32IB:       # %bb.0:
-; RV32IB-NEXT:    sloi a0, a0, 1
-; RV32IB-NEXT:    ret
-;
-; RV32IBB-LABEL: sloi_i32:
-; RV32IBB:       # %bb.0:
-; RV32IBB-NEXT:    sloi a0, a0, 1
-; RV32IBB-NEXT:    ret
-  %neg = shl i32 %a, 1
-  %neg12 = or i32 %neg, 1
-  ret i32 %neg12
-}
-
-define i64 @sloi_i64(i64 %a) nounwind {
-; RV32I-LABEL: sloi_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a2, a0, 31
-; RV32I-NEXT:    slli a1, a1, 1
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    ori a0, a0, 1
-; RV32I-NEXT:    ret
-;
-; RV32IB-LABEL: sloi_i64:
-; RV32IB:       # %bb.0:
-; RV32IB-NEXT:    fsri a1, a0, a1, 31
-; RV32IB-NEXT:    sloi a0, a0, 1
-; RV32IB-NEXT:    ret
-;
-; RV32IBB-LABEL: sloi_i64:
-; RV32IBB:       # %bb.0:
-; RV32IBB-NEXT:    srli a2, a0, 31
-; RV32IBB-NEXT:    slli a1, a1, 1
-; RV32IBB-NEXT:    or a1, a1, a2
-; RV32IBB-NEXT:    sloi a0, a0, 1
-; RV32IBB-NEXT:    ret
-  %neg = shl i64 %a, 1
-  %neg12 = or i64 %neg, 1
-  ret i64 %neg12
-}
-
-define i32 @sroi_i32(i32 %a) nounwind {
-; RV32I-LABEL: sroi_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32IB-LABEL: sroi_i32:
-; RV32IB:       # %bb.0:
-; RV32IB-NEXT:    sroi a0, a0, 1
-; RV32IB-NEXT:    ret
-;
-; RV32IBB-LABEL: sroi_i32:
-; RV32IBB:       # %bb.0:
-; RV32IBB-NEXT:    sroi a0, a0, 1
-; RV32IBB-NEXT:    ret
-  %neg = lshr i32 %a, 1
-  %neg12 = or i32 %neg, -2147483648
-  ret i32 %neg12
-}
-
-define i64 @sroi_i64(i64 %a) nounwind {
-; RV32I-LABEL: sroi_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a1, 31
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    srli a1, a1, 1
-; RV32I-NEXT:    lui a2, 524288
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    ret
-;
-; RV32IB-LABEL: sroi_i64:
-; RV32IB:       # %bb.0:
-; RV32IB-NEXT:    fsri a0, a0, a1, 1
-; RV32IB-NEXT:    sroi a1, a1, 1
-; RV32IB-NEXT:    ret
-;
-; RV32IBB-LABEL: sroi_i64:
-; RV32IBB:       # %bb.0:
-; RV32IBB-NEXT:    slli a2, a1, 31
-; RV32IBB-NEXT:    srli a0, a0, 1
-; RV32IBB-NEXT:    or a0, a0, a2
-; RV32IBB-NEXT:    sroi a1, a1, 1
-; RV32IBB-NEXT:    ret
-  %neg = lshr i64 %a, 1
-  %neg12 = or i64 %neg, -9223372036854775808
-  ret i64 %neg12
-}
-
 declare i32 @llvm.ctlz.i32(i32, i1)
 
 define i32 @ctlz_i32(i32 %a) nounwind {
@@ -315,7 +13,7 @@ define i32 @ctlz_i32(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    beqz a0, .LBB8_2
+; RV32I-NEXT:    beqz a0, .LBB0_2
 ; RV32I-NEXT:  # %bb.1: # %cond.false
 ; RV32I-NEXT:    srli a1, a0, 1
 ; RV32I-NEXT:    or a0, a0, a1
@@ -348,10 +46,10 @@ define i32 @ctlz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    addi a1, a1, 257
 ; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    j .LBB8_3
-; RV32I-NEXT:  .LBB8_2:
+; RV32I-NEXT:    j .LBB0_3
+; RV32I-NEXT:  .LBB0_2:
 ; RV32I-NEXT:    addi a0, zero, 32
-; RV32I-NEXT:  .LBB8_3: # %cond.end
+; RV32I-NEXT:  .LBB0_3: # %cond.end
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -440,14 +138,14 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, s6
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    bnez s3, .LBB9_2
+; RV32I-NEXT:    bnez s3, .LBB1_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    addi a0, a0, 32
-; RV32I-NEXT:    j .LBB9_3
-; RV32I-NEXT:  .LBB9_2:
+; RV32I-NEXT:    j .LBB1_3
+; RV32I-NEXT:  .LBB1_2:
 ; RV32I-NEXT:    srli a0, s2, 24
-; RV32I-NEXT:  .LBB9_3:
+; RV32I-NEXT:  .LBB1_3:
 ; RV32I-NEXT:    mv a1, zero
 ; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
@@ -471,13 +169,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ;
 ; RV32IBB-LABEL: ctlz_i64:
 ; RV32IBB:       # %bb.0:
-; RV32IBB-NEXT:    bnez a1, .LBB9_2
+; RV32IBB-NEXT:    bnez a1, .LBB1_2
 ; RV32IBB-NEXT:  # %bb.1:
 ; RV32IBB-NEXT:    clz a0, a0
 ; RV32IBB-NEXT:    addi a0, a0, 32
 ; RV32IBB-NEXT:    mv a1, zero
 ; RV32IBB-NEXT:    ret
-; RV32IBB-NEXT:  .LBB9_2:
+; RV32IBB-NEXT:  .LBB1_2:
 ; RV32IBB-NEXT:    clz a0, a1
 ; RV32IBB-NEXT:    mv a1, zero
 ; RV32IBB-NEXT:    ret
@@ -492,7 +190,7 @@ define i32 @cttz_i32(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    beqz a0, .LBB10_2
+; RV32I-NEXT:    beqz a0, .LBB2_2
 ; RV32I-NEXT:  # %bb.1: # %cond.false
 ; RV32I-NEXT:    addi a1, a0, -1
 ; RV32I-NEXT:    not a0, a0
@@ -517,10 +215,10 @@ define i32 @cttz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    addi a1, a1, 257
 ; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    j .LBB10_3
-; RV32I-NEXT:  .LBB10_2:
+; RV32I-NEXT:    j .LBB2_3
+; RV32I-NEXT:  .LBB2_2:
 ; RV32I-NEXT:    addi a0, zero, 32
-; RV32I-NEXT:  .LBB10_3: # %cond.end
+; RV32I-NEXT:  .LBB2_3: # %cond.end
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -593,14 +291,14 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, s6
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    bnez s4, .LBB11_2
+; RV32I-NEXT:    bnez s4, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    addi a0, a0, 32
-; RV32I-NEXT:    j .LBB11_3
-; RV32I-NEXT:  .LBB11_2:
+; RV32I-NEXT:    j .LBB3_3
+; RV32I-NEXT:  .LBB3_2:
 ; RV32I-NEXT:    srli a0, s2, 24
-; RV32I-NEXT:  .LBB11_3:
+; RV32I-NEXT:  .LBB3_3:
 ; RV32I-NEXT:    mv a1, zero
 ; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
@@ -624,13 +322,13 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ;
 ; RV32IBB-LABEL: cttz_i64:
 ; RV32IBB:       # %bb.0:
-; RV32IBB-NEXT:    bnez a0, .LBB11_2
+; RV32IBB-NEXT:    bnez a0, .LBB3_2
 ; RV32IBB-NEXT:  # %bb.1:
 ; RV32IBB-NEXT:    ctz a0, a1
 ; RV32IBB-NEXT:    addi a0, a0, 32
 ; RV32IBB-NEXT:    mv a1, zero
 ; RV32IBB-NEXT:    ret
-; RV32IBB-NEXT:  .LBB11_2:
+; RV32IBB-NEXT:  .LBB3_2:
 ; RV32IBB-NEXT:    ctz a0, a0
 ; RV32IBB-NEXT:    mv a1, zero
 ; RV32IBB-NEXT:    ret
@@ -854,10 +552,10 @@ define i64 @sexth_i64(i64 %a) nounwind {
 define i32 @min_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: min_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    blt a0, a1, .LBB18_2
+; RV32I-NEXT:    blt a0, a1, .LBB10_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:  .LBB18_2:
+; RV32I-NEXT:  .LBB10_2:
 ; RV32I-NEXT:    ret
 ;
 ; RV32IB-LABEL: min_i32:
@@ -882,18 +580,18 @@ define i32 @min_i32(i32 %a, i32 %b) nounwind {
 define i64 @min_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: min_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beq a1, a3, .LBB19_2
+; RV32I-NEXT:    beq a1, a3, .LBB11_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    slt a4, a1, a3
-; RV32I-NEXT:    beqz a4, .LBB19_3
-; RV32I-NEXT:    j .LBB19_4
-; RV32I-NEXT:  .LBB19_2:
+; RV32I-NEXT:    beqz a4, .LBB11_3
+; RV32I-NEXT:    j .LBB11_4
+; RV32I-NEXT:  .LBB11_2:
 ; RV32I-NEXT:    sltu a4, a0, a2
-; RV32I-NEXT:    bnez a4, .LBB19_4
-; RV32I-NEXT:  .LBB19_3:
+; RV32I-NEXT:    bnez a4, .LBB11_4
+; RV32I-NEXT:  .LBB11_3:
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    mv a1, a3
-; RV32I-NEXT:  .LBB19_4:
+; RV32I-NEXT:  .LBB11_4:
 ; RV32I-NEXT:    ret
 ;
 ; RV32IB-LABEL: min_i64:
@@ -909,16 +607,16 @@ define i64 @min_i64(i64 %a, i64 %b) nounwind {
 ; RV32IBB-LABEL: min_i64:
 ; RV32IBB:       # %bb.0:
 ; RV32IBB-NEXT:    mv a4, a0
-; RV32IBB-NEXT:    bge a1, a3, .LBB19_3
+; RV32IBB-NEXT:    bge a1, a3, .LBB11_3
 ; RV32IBB-NEXT:  # %bb.1:
-; RV32IBB-NEXT:    beq a1, a3, .LBB19_4
-; RV32IBB-NEXT:  .LBB19_2:
+; RV32IBB-NEXT:    beq a1, a3, .LBB11_4
+; RV32IBB-NEXT:  .LBB11_2:
 ; RV32IBB-NEXT:    min a1, a1, a3
 ; RV32IBB-NEXT:    ret
-; RV32IBB-NEXT:  .LBB19_3:
+; RV32IBB-NEXT:  .LBB11_3:
 ; RV32IBB-NEXT:    mv a0, a2
-; RV32IBB-NEXT:    bne a1, a3, .LBB19_2
-; RV32IBB-NEXT:  .LBB19_4:
+; RV32IBB-NEXT:    bne a1, a3, .LBB11_2
+; RV32IBB-NEXT:  .LBB11_4:
 ; RV32IBB-NEXT:    minu a0, a4, a2
 ; RV32IBB-NEXT:    min a1, a1, a3
 ; RV32IBB-NEXT:    ret
@@ -930,10 +628,10 @@ define i64 @min_i64(i64 %a, i64 %b) nounwind {
 define i32 @max_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: max_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    blt a1, a0, .LBB20_2
+; RV32I-NEXT:    blt a1, a0, .LBB12_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:  .LBB20_2:
+; RV32I-NEXT:  .LBB12_2:
 ; RV32I-NEXT:    ret
 ;
 ; RV32IB-LABEL: max_i32:
@@ -958,18 +656,18 @@ define i32 @max_i32(i32 %a, i32 %b) nounwind {
 define i64 @max_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: max_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beq a1, a3, .LBB21_2
+; RV32I-NEXT:    beq a1, a3, .LBB13_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    slt a4, a3, a1
-; RV32I-NEXT:    beqz a4, .LBB21_3
-; RV32I-NEXT:    j .LBB21_4
-; RV32I-NEXT:  .LBB21_2:
+; RV32I-NEXT:    beqz a4, .LBB13_3
+; RV32I-NEXT:    j .LBB13_4
+; RV32I-NEXT:  .LBB13_2:
 ; RV32I-NEXT:    sltu a4, a2, a0
-; RV32I-NEXT:    bnez a4, .LBB21_4
-; RV32I-NEXT:  .LBB21_3:
+; RV32I-NEXT:    bnez a4, .LBB13_4
+; RV32I-NEXT:  .LBB13_3:
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    mv a1, a3
-; RV32I-NEXT:  .LBB21_4:
+; RV32I-NEXT:  .LBB13_4:
 ; RV32I-NEXT:    ret
 ;
 ; RV32IB-LABEL: max_i64:
@@ -985,16 +683,16 @@ define i64 @max_i64(i64 %a, i64 %b) nounwind {
 ; RV32IBB-LABEL: max_i64:
 ; RV32IBB:       # %bb.0:
 ; RV32IBB-NEXT:    mv a4, a0
-; RV32IBB-NEXT:    bge a3, a1, .LBB21_3
+; RV32IBB-NEXT:    bge a3, a1, .LBB13_3
 ; RV32IBB-NEXT:  # %bb.1:
-; RV32IBB-NEXT:    beq a1, a3, .LBB21_4
-; RV32IBB-NEXT:  .LBB21_2:
+; RV32IBB-NEXT:    beq a1, a3, .LBB13_4
+; RV32IBB-NEXT:  .LBB13_2:
 ; RV32IBB-NEXT:    max a1, a1, a3
 ; RV32IBB-NEXT:    ret
-; RV32IBB-NEXT:  .LBB21_3:
+; RV32IBB-NEXT:  .LBB13_3:
 ; RV32IBB-NEXT:    mv a0, a2
-; RV32IBB-NEXT:    bne a1, a3, .LBB21_2
-; RV32IBB-NEXT:  .LBB21_4:
+; RV32IBB-NEXT:    bne a1, a3, .LBB13_2
+; RV32IBB-NEXT:  .LBB13_4:
 ; RV32IBB-NEXT:    maxu a0, a4, a2
 ; RV32IBB-NEXT:    max a1, a1, a3
 ; RV32IBB-NEXT:    ret
@@ -1006,10 +704,10 @@ define i64 @max_i64(i64 %a, i64 %b) nounwind {
 define i32 @minu_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: minu_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    bltu a0, a1, .LBB22_2
+; RV32I-NEXT:    bltu a0, a1, .LBB14_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:  .LBB22_2:
+; RV32I-NEXT:  .LBB14_2:
 ; RV32I-NEXT:    ret
 ;
 ; RV32IB-LABEL: minu_i32:
@@ -1034,18 +732,18 @@ define i32 @minu_i32(i32 %a, i32 %b) nounwind {
 define i64 @minu_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: minu_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beq a1, a3, .LBB23_2
+; RV32I-NEXT:    beq a1, a3, .LBB15_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sltu a4, a1, a3
-; RV32I-NEXT:    beqz a4, .LBB23_3
-; RV32I-NEXT:    j .LBB23_4
-; RV32I-NEXT:  .LBB23_2:
+; RV32I-NEXT:    beqz a4, .LBB15_3
+; RV32I-NEXT:    j .LBB15_4
+; RV32I-NEXT:  .LBB15_2:
 ; RV32I-NEXT:    sltu a4, a0, a2
-; RV32I-NEXT:    bnez a4, .LBB23_4
-; RV32I-NEXT:  .LBB23_3:
+; RV32I-NEXT:    bnez a4, .LBB15_4
+; RV32I-NEXT:  .LBB15_3:
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    mv a1, a3
-; RV32I-NEXT:  .LBB23_4:
+; RV32I-NEXT:  .LBB15_4:
 ; RV32I-NEXT:    ret
 ;
 ; RV32IB-LABEL: minu_i64:
@@ -1061,16 +759,16 @@ define i64 @minu_i64(i64 %a, i64 %b) nounwind {
 ; RV32IBB-LABEL: minu_i64:
 ; RV32IBB:       # %bb.0:
 ; RV32IBB-NEXT:    mv a4, a0
-; RV32IBB-NEXT:    bgeu a1, a3, .LBB23_3
+; RV32IBB-NEXT:    bgeu a1, a3, .LBB15_3
 ; RV32IBB-NEXT:  # %bb.1:
-; RV32IBB-NEXT:    beq a1, a3, .LBB23_4
-; RV32IBB-NEXT:  .LBB23_2:
+; RV32IBB-NEXT:    beq a1, a3, .LBB15_4
+; RV32IBB-NEXT:  .LBB15_2:
 ; RV32IBB-NEXT:    minu a1, a1, a3
 ; RV32IBB-NEXT:    ret
-; RV32IBB-NEXT:  .LBB23_3:
+; RV32IBB-NEXT:  .LBB15_3:
 ; RV32IBB-NEXT:    mv a0, a2
-; RV32IBB-NEXT:    bne a1, a3, .LBB23_2
-; RV32IBB-NEXT:  .LBB23_4:
+; RV32IBB-NEXT:    bne a1, a3, .LBB15_2
+; RV32IBB-NEXT:  .LBB15_4:
 ; RV32IBB-NEXT:    minu a0, a4, a2
 ; RV32IBB-NEXT:    minu a1, a1, a3
 ; RV32IBB-NEXT:    ret
@@ -1082,10 +780,10 @@ define i64 @minu_i64(i64 %a, i64 %b) nounwind {
 define i32 @maxu_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: maxu_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    bltu a1, a0, .LBB24_2
+; RV32I-NEXT:    bltu a1, a0, .LBB16_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:  .LBB24_2:
+; RV32I-NEXT:  .LBB16_2:
 ; RV32I-NEXT:    ret
 ;
 ; RV32IB-LABEL: maxu_i32:
@@ -1110,18 +808,18 @@ define i32 @maxu_i32(i32 %a, i32 %b) nounwind {
 define i64 @maxu_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: maxu_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beq a1, a3, .LBB25_2
+; RV32I-NEXT:    beq a1, a3, .LBB17_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sltu a4, a3, a1
-; RV32I-NEXT:    beqz a4, .LBB25_3
-; RV32I-NEXT:    j .LBB25_4
-; RV32I-NEXT:  .LBB25_2:
+; RV32I-NEXT:    beqz a4, .LBB17_3
+; RV32I-NEXT:    j .LBB17_4
+; RV32I-NEXT:  .LBB17_2:
 ; RV32I-NEXT:    sltu a4, a2, a0
-; RV32I-NEXT:    bnez a4, .LBB25_4
-; RV32I-NEXT:  .LBB25_3:
+; RV32I-NEXT:    bnez a4, .LBB17_4
+; RV32I-NEXT:  .LBB17_3:
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    mv a1, a3
-; RV32I-NEXT:  .LBB25_4:
+; RV32I-NEXT:  .LBB17_4:
 ; RV32I-NEXT:    ret
 ;
 ; RV32IB-LABEL: maxu_i64:
@@ -1137,16 +835,16 @@ define i64 @maxu_i64(i64 %a, i64 %b) nounwind {
 ; RV32IBB-LABEL: maxu_i64:
 ; RV32IBB:       # %bb.0:
 ; RV32IBB-NEXT:    mv a4, a0
-; RV32IBB-NEXT:    bgeu a3, a1, .LBB25_3
+; RV32IBB-NEXT:    bgeu a3, a1, .LBB17_3
 ; RV32IBB-NEXT:  # %bb.1:
-; RV32IBB-NEXT:    beq a1, a3, .LBB25_4
-; RV32IBB-NEXT:  .LBB25_2:
+; RV32IBB-NEXT:    beq a1, a3, .LBB17_4
+; RV32IBB-NEXT:  .LBB17_2:
 ; RV32IBB-NEXT:    maxu a1, a1, a3
 ; RV32IBB-NEXT:    ret
-; RV32IBB-NEXT:  .LBB25_3:
+; RV32IBB-NEXT:  .LBB17_3:
 ; RV32IBB-NEXT:    mv a0, a2
-; RV32IBB-NEXT:    bne a1, a3, .LBB25_2
-; RV32IBB-NEXT:  .LBB25_4:
+; RV32IBB-NEXT:    bne a1, a3, .LBB17_2
+; RV32IBB-NEXT:  .LBB17_4:
 ; RV32IBB-NEXT:    maxu a0, a4, a2
 ; RV32IBB-NEXT:    maxu a1, a1, a3
 ; RV32IBB-NEXT:    ret
@@ -1185,13 +883,13 @@ declare i64 @llvm.abs.i64(i64, i1 immarg)
 define i64 @abs_i64(i64 %x) {
 ; RV32I-LABEL: abs_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    bgez a1, .LBB27_2
+; RV32I-NEXT:    bgez a1, .LBB19_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    snez a2, a0
 ; RV32I-NEXT:    neg a0, a0
 ; RV32I-NEXT:    add a1, a1, a2
 ; RV32I-NEXT:    neg a1, a1
-; RV32I-NEXT:  .LBB27_2:
+; RV32I-NEXT:  .LBB19_2:
 ; RV32I-NEXT:    ret
 ;
 ; RV32IB-LABEL: abs_i64:
@@ -1208,13 +906,13 @@ define i64 @abs_i64(i64 %x) {
 ;
 ; RV32IBB-LABEL: abs_i64:
 ; RV32IBB:       # %bb.0:
-; RV32IBB-NEXT:    bgez a1, .LBB27_2
+; RV32IBB-NEXT:    bgez a1, .LBB19_2
 ; RV32IBB-NEXT:  # %bb.1:
 ; RV32IBB-NEXT:    snez a2, a0
 ; RV32IBB-NEXT:    neg a0, a0
 ; RV32IBB-NEXT:    add a1, a1, a2
 ; RV32IBB-NEXT:    neg a1, a1
-; RV32IBB-NEXT:  .LBB27_2:
+; RV32IBB-NEXT:  .LBB19_2:
 ; RV32IBB-NEXT:    ret
   %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
   ret i64 %abs

diff  --git a/llvm/test/CodeGen/RISCV/rv32Zbp.ll b/llvm/test/CodeGen/RISCV/rv32Zbp.ll
index 0ca3a2b10b1b..1ad9fc69b7c7 100644
--- a/llvm/test/CodeGen/RISCV/rv32Zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv32Zbp.ll
@@ -6,6 +6,308 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbp -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32IBP
 
+define i32 @slo_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: slo_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: slo_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    slo a0, a0, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: slo_i32:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    slo a0, a0, a1
+; RV32IBP-NEXT:    ret
+  %neg = xor i32 %a, -1
+  %shl = shl i32 %neg, %b
+  %neg1 = xor i32 %shl, -1
+  ret i32 %neg1
+}
+
+; As we are not matching directly i64 code patterns on RV32 some i64 patterns
+; don't have yet any matching bit manipulation instructions on RV32.
+; This test is presented here in case future expansions of the experimental-b
+; extension introduce instructions suitable for this pattern.
+
+define i64 @slo_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: slo_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a3, a2, -32
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    bltz a3, .LBB1_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    sll a1, a0, a3
+; RV32I-NEXT:    j .LBB1_3
+; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    sll a1, a1, a2
+; RV32I-NEXT:    addi a3, zero, 31
+; RV32I-NEXT:    sub a3, a3, a2
+; RV32I-NEXT:    srli a4, a0, 1
+; RV32I-NEXT:    srl a3, a4, a3
+; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    sll a2, a0, a2
+; RV32I-NEXT:  .LBB1_3:
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    not a0, a2
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: slo_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    not a0, a0
+; RV32IB-NEXT:    not a1, a1
+; RV32IB-NEXT:    sll a1, a1, a2
+; RV32IB-NEXT:    addi a3, zero, 31
+; RV32IB-NEXT:    sub a3, a3, a2
+; RV32IB-NEXT:    srli a4, a0, 1
+; RV32IB-NEXT:    srl a3, a4, a3
+; RV32IB-NEXT:    or a1, a1, a3
+; RV32IB-NEXT:    addi a3, a2, -32
+; RV32IB-NEXT:    sll a4, a0, a3
+; RV32IB-NEXT:    slti a5, a3, 0
+; RV32IB-NEXT:    cmov a1, a5, a1, a4
+; RV32IB-NEXT:    sll a0, a0, a2
+; RV32IB-NEXT:    srai a2, a3, 31
+; RV32IB-NEXT:    and a0, a2, a0
+; RV32IB-NEXT:    not a1, a1
+; RV32IB-NEXT:    not a0, a0
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: slo_i64:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    addi a3, a2, -32
+; RV32IBP-NEXT:    not a0, a0
+; RV32IBP-NEXT:    bltz a3, .LBB1_2
+; RV32IBP-NEXT:  # %bb.1:
+; RV32IBP-NEXT:    mv a2, zero
+; RV32IBP-NEXT:    sll a1, a0, a3
+; RV32IBP-NEXT:    j .LBB1_3
+; RV32IBP-NEXT:  .LBB1_2:
+; RV32IBP-NEXT:    not a1, a1
+; RV32IBP-NEXT:    sll a1, a1, a2
+; RV32IBP-NEXT:    addi a3, zero, 31
+; RV32IBP-NEXT:    sub a3, a3, a2
+; RV32IBP-NEXT:    srli a4, a0, 1
+; RV32IBP-NEXT:    srl a3, a4, a3
+; RV32IBP-NEXT:    or a1, a1, a3
+; RV32IBP-NEXT:    sll a2, a0, a2
+; RV32IBP-NEXT:  .LBB1_3:
+; RV32IBP-NEXT:    not a1, a1
+; RV32IBP-NEXT:    not a0, a2
+; RV32IBP-NEXT:    ret
+  %neg = xor i64 %a, -1
+  %shl = shl i64 %neg, %b
+  %neg1 = xor i64 %shl, -1
+  ret i64 %neg1
+}
+
+define i32 @sro_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: sro_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    srl a0, a0, a1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sro_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    sro a0, a0, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: sro_i32:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    sro a0, a0, a1
+; RV32IBP-NEXT:    ret
+  %neg = xor i32 %a, -1
+  %shr = lshr i32 %neg, %b
+  %neg1 = xor i32 %shr, -1
+  ret i32 %neg1
+}
+
+; As we are not matching directly i64 code patterns on RV32 some i64 patterns
+; don't have yet any matching bit manipulation instructions on RV32.
+; This test is presented here in case future expansions of the experimental-b
+; extension introduce instructions suitable for this pattern.
+
+define i64 @sro_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: sro_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a3, a2, -32
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    bltz a3, .LBB3_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    srl a0, a1, a3
+; RV32I-NEXT:    j .LBB3_3
+; RV32I-NEXT:  .LBB3_2:
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    srl a0, a0, a2
+; RV32I-NEXT:    addi a3, zero, 31
+; RV32I-NEXT:    sub a3, a3, a2
+; RV32I-NEXT:    slli a4, a1, 1
+; RV32I-NEXT:    sll a3, a4, a3
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    srl a2, a1, a2
+; RV32I-NEXT:  .LBB3_3:
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    not a1, a2
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sro_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    not a1, a1
+; RV32IB-NEXT:    not a0, a0
+; RV32IB-NEXT:    srl a0, a0, a2
+; RV32IB-NEXT:    addi a3, zero, 31
+; RV32IB-NEXT:    sub a3, a3, a2
+; RV32IB-NEXT:    slli a4, a1, 1
+; RV32IB-NEXT:    sll a3, a4, a3
+; RV32IB-NEXT:    or a0, a0, a3
+; RV32IB-NEXT:    addi a3, a2, -32
+; RV32IB-NEXT:    srl a4, a1, a3
+; RV32IB-NEXT:    slti a5, a3, 0
+; RV32IB-NEXT:    cmov a0, a5, a0, a4
+; RV32IB-NEXT:    srl a1, a1, a2
+; RV32IB-NEXT:    srai a2, a3, 31
+; RV32IB-NEXT:    and a1, a2, a1
+; RV32IB-NEXT:    not a0, a0
+; RV32IB-NEXT:    not a1, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: sro_i64:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    addi a3, a2, -32
+; RV32IBP-NEXT:    not a1, a1
+; RV32IBP-NEXT:    bltz a3, .LBB3_2
+; RV32IBP-NEXT:  # %bb.1:
+; RV32IBP-NEXT:    mv a2, zero
+; RV32IBP-NEXT:    srl a0, a1, a3
+; RV32IBP-NEXT:    j .LBB3_3
+; RV32IBP-NEXT:  .LBB3_2:
+; RV32IBP-NEXT:    not a0, a0
+; RV32IBP-NEXT:    srl a0, a0, a2
+; RV32IBP-NEXT:    addi a3, zero, 31
+; RV32IBP-NEXT:    sub a3, a3, a2
+; RV32IBP-NEXT:    slli a4, a1, 1
+; RV32IBP-NEXT:    sll a3, a4, a3
+; RV32IBP-NEXT:    or a0, a0, a3
+; RV32IBP-NEXT:    srl a2, a1, a2
+; RV32IBP-NEXT:  .LBB3_3:
+; RV32IBP-NEXT:    not a0, a0
+; RV32IBP-NEXT:    not a1, a2
+; RV32IBP-NEXT:    ret
+  %neg = xor i64 %a, -1
+  %shr = lshr i64 %neg, %b
+  %neg1 = xor i64 %shr, -1
+  ret i64 %neg1
+}
+
+define i32 @sloi_i32(i32 %a) nounwind {
+; RV32I-LABEL: sloi_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    ori a0, a0, 1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sloi_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    sloi a0, a0, 1
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: sloi_i32:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    sloi a0, a0, 1
+; RV32IBP-NEXT:    ret
+  %neg = shl i32 %a, 1
+  %neg12 = or i32 %neg, 1
+  ret i32 %neg12
+}
+
+define i64 @sloi_i64(i64 %a) nounwind {
+; RV32I-LABEL: sloi_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a2, a0, 31
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    ori a0, a0, 1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sloi_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    fsri a1, a0, a1, 31
+; RV32IB-NEXT:    sloi a0, a0, 1
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: sloi_i64:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    srli a2, a0, 31
+; RV32IBP-NEXT:    slli a1, a1, 1
+; RV32IBP-NEXT:    or a1, a1, a2
+; RV32IBP-NEXT:    sloi a0, a0, 1
+; RV32IBP-NEXT:    ret
+  %neg = shl i64 %a, 1
+  %neg12 = or i64 %neg, 1
+  ret i64 %neg12
+}
+
+define i32 @sroi_i32(i32 %a) nounwind {
+; RV32I-LABEL: sroi_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    lui a1, 524288
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sroi_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    sroi a0, a0, 1
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: sroi_i32:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    sroi a0, a0, 1
+; RV32IBP-NEXT:    ret
+  %neg = lshr i32 %a, 1
+  %neg12 = or i32 %neg, -2147483648
+  ret i32 %neg12
+}
+
+define i64 @sroi_i64(i64 %a) nounwind {
+; RV32I-LABEL: sroi_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a1, 31
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    srli a1, a1, 1
+; RV32I-NEXT:    lui a2, 524288
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sroi_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    fsri a0, a0, a1, 1
+; RV32IB-NEXT:    sroi a1, a1, 1
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: sroi_i64:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    slli a2, a1, 31
+; RV32IBP-NEXT:    srli a0, a0, 1
+; RV32IBP-NEXT:    or a0, a0, a2
+; RV32IBP-NEXT:    sroi a1, a1, 1
+; RV32IBP-NEXT:    ret
+  %neg = lshr i64 %a, 1
+  %neg12 = or i64 %neg, -9223372036854775808
+  ret i64 %neg12
+}
+
 define i32 @gorc1_i32(i32 %a) nounwind {
 ; RV32I-LABEL: gorc1_i32:
 ; RV32I:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rv64Zbb.ll b/llvm/test/CodeGen/RISCV/rv64Zbb.ll
index 0fdb001b18af..5c34f4116028 100644
--- a/llvm/test/CodeGen/RISCV/rv64Zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64Zbb.ll
@@ -6,216 +6,6 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbb -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64IBB
 
-define signext i32 @slo_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: slo_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    sllw a0, a0, a1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    ret
-;
-; RV64IB-LABEL: slo_i32:
-; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    slow a0, a0, a1
-; RV64IB-NEXT:    ret
-;
-; RV64IBB-LABEL: slo_i32:
-; RV64IBB:       # %bb.0:
-; RV64IBB-NEXT:    slow a0, a0, a1
-; RV64IBB-NEXT:    ret
-  %neg = xor i32 %a, -1
-  %shl = shl i32 %neg, %b
-  %neg1 = xor i32 %shl, -1
-  ret i32 %neg1
-}
-
-define i64 @slo_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: slo_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    sll a0, a0, a1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    ret
-;
-; RV64IB-LABEL: slo_i64:
-; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    slo a0, a0, a1
-; RV64IB-NEXT:    ret
-;
-; RV64IBB-LABEL: slo_i64:
-; RV64IBB:       # %bb.0:
-; RV64IBB-NEXT:    slo a0, a0, a1
-; RV64IBB-NEXT:    ret
-  %neg = xor i64 %a, -1
-  %shl = shl i64 %neg, %b
-  %neg1 = xor i64 %shl, -1
-  ret i64 %neg1
-}
-
-define signext i32 @sro_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: sro_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    srlw a0, a0, a1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    ret
-;
-; RV64IB-LABEL: sro_i32:
-; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    srow a0, a0, a1
-; RV64IB-NEXT:    ret
-;
-; RV64IBB-LABEL: sro_i32:
-; RV64IBB:       # %bb.0:
-; RV64IBB-NEXT:    srow a0, a0, a1
-; RV64IBB-NEXT:    ret
-  %neg = xor i32 %a, -1
-  %shr = lshr i32 %neg, %b
-  %neg1 = xor i32 %shr, -1
-  ret i32 %neg1
-}
-
-define i64 @sro_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: sro_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    srl a0, a0, a1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    ret
-;
-; RV64IB-LABEL: sro_i64:
-; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    sro a0, a0, a1
-; RV64IB-NEXT:    ret
-;
-; RV64IBB-LABEL: sro_i64:
-; RV64IBB:       # %bb.0:
-; RV64IBB-NEXT:    sro a0, a0, a1
-; RV64IBB-NEXT:    ret
-  %neg = xor i64 %a, -1
-  %shr = lshr i64 %neg, %b
-  %neg1 = xor i64 %shr, -1
-  ret i64 %neg1
-}
-
-define signext i32 @sloi_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: sloi_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    ori a0, a0, 1
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    ret
-;
-; RV64IB-LABEL: sloi_i32:
-; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    sloiw a0, a0, 1
-; RV64IB-NEXT:    ret
-;
-; RV64IBB-LABEL: sloi_i32:
-; RV64IBB:       # %bb.0:
-; RV64IBB-NEXT:    sloiw a0, a0, 1
-; RV64IBB-NEXT:    ret
-  %neg = shl i32 %a, 1
-  %neg12 = or i32 %neg, 1
-  ret i32 %neg12
-}
-
-define i64 @sloi_i64(i64 %a) nounwind {
-; RV64I-LABEL: sloi_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    ori a0, a0, 1
-; RV64I-NEXT:    ret
-;
-; RV64IB-LABEL: sloi_i64:
-; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    sloi a0, a0, 1
-; RV64IB-NEXT:    ret
-;
-; RV64IBB-LABEL: sloi_i64:
-; RV64IBB:       # %bb.0:
-; RV64IBB-NEXT:    sloi a0, a0, 1
-; RV64IBB-NEXT:    ret
-  %neg = shl i64 %a, 1
-  %neg12 = or i64 %neg, 1
-  ret i64 %neg12
-}
-
-define signext i32 @sroi_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: sroi_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64IB-LABEL: sroi_i32:
-; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    sroiw a0, a0, 1
-; RV64IB-NEXT:    ret
-;
-; RV64IBB-LABEL: sroi_i32:
-; RV64IBB:       # %bb.0:
-; RV64IBB-NEXT:    sroiw a0, a0, 1
-; RV64IBB-NEXT:    ret
-  %neg = lshr i32 %a, 1
-  %neg12 = or i32 %neg, -2147483648
-  ret i32 %neg12
-}
-
-; This is similar to the type legalized version of sroiw but the mask is 0 in
-; the upper bits instead of 1 so the result is not sign extended. Make sure we
-; don't match it to sroiw.
-define i64 @sroiw_bug(i64 %a) nounwind {
-; RV64I-LABEL: sroiw_bug:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    addi a1, zero, 1
-; RV64I-NEXT:    slli a1, a1, 31
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64IB-LABEL: sroiw_bug:
-; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    srli a0, a0, 1
-; RV64IB-NEXT:    sbseti a0, a0, 31
-; RV64IB-NEXT:    ret
-;
-; RV64IBB-LABEL: sroiw_bug:
-; RV64IBB:       # %bb.0:
-; RV64IBB-NEXT:    srli a0, a0, 1
-; RV64IBB-NEXT:    addi a1, zero, 1
-; RV64IBB-NEXT:    slli a1, a1, 31
-; RV64IBB-NEXT:    or a0, a0, a1
-; RV64IBB-NEXT:    ret
-  %neg = lshr i64 %a, 1
-  %neg12 = or i64 %neg, 2147483648
-  ret i64 %neg12
-}
-
-define i64 @sroi_i64(i64 %a) nounwind {
-; RV64I-LABEL: sroi_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    addi a1, zero, -1
-; RV64I-NEXT:    slli a1, a1, 63
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64IB-LABEL: sroi_i64:
-; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    sroi a0, a0, 1
-; RV64IB-NEXT:    ret
-;
-; RV64IBB-LABEL: sroi_i64:
-; RV64IBB:       # %bb.0:
-; RV64IBB-NEXT:    sroi a0, a0, 1
-; RV64IBB-NEXT:    ret
-  %neg = lshr i64 %a, 1
-  %neg12 = or i64 %neg, -9223372036854775808
-  ret i64 %neg12
-}
-
 declare i32 @llvm.ctlz.i32(i32, i1)
 
 define signext i32 @ctlz_i32(i32 signext %a) nounwind {
@@ -223,7 +13,7 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    beqz a0, .LBB9_2
+; RV64I-NEXT:    beqz a0, .LBB0_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    srliw a1, a0, 1
 ; RV64I-NEXT:    slli a0, a0, 32
@@ -283,10 +73,10 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    addi a0, a0, -32
-; RV64I-NEXT:    j .LBB9_3
-; RV64I-NEXT:  .LBB9_2:
+; RV64I-NEXT:    j .LBB0_3
+; RV64I-NEXT:  .LBB0_2:
 ; RV64I-NEXT:    addi a0, zero, 32
-; RV64I-NEXT:  .LBB9_3: # %cond.end
+; RV64I-NEXT:  .LBB0_3: # %cond.end
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -311,7 +101,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    beqz a0, .LBB10_2
+; RV64I-NEXT:    beqz a0, .LBB1_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    srli a1, a0, 1
 ; RV64I-NEXT:    or a0, a0, a1
@@ -368,10 +158,10 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addi a1, a1, 257
 ; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 56
-; RV64I-NEXT:    j .LBB10_3
-; RV64I-NEXT:  .LBB10_2:
+; RV64I-NEXT:    j .LBB1_3
+; RV64I-NEXT:  .LBB1_2:
 ; RV64I-NEXT:    addi a0, zero, 64
-; RV64I-NEXT:  .LBB10_3: # %cond.end
+; RV64I-NEXT:  .LBB1_3: # %cond.end
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -396,7 +186,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    beqz a0, .LBB11_2
+; RV64I-NEXT:    beqz a0, .LBB2_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi a1, a0, -1
 ; RV64I-NEXT:    not a0, a0
@@ -443,10 +233,10 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    addi a1, a1, 257
 ; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 56
-; RV64I-NEXT:    j .LBB11_3
-; RV64I-NEXT:  .LBB11_2:
+; RV64I-NEXT:    j .LBB2_3
+; RV64I-NEXT:  .LBB2_2:
 ; RV64I-NEXT:    addi a0, zero, 32
-; RV64I-NEXT:  .LBB11_3: # %cond.end
+; RV64I-NEXT:  .LBB2_3: # %cond.end
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -471,7 +261,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    beqz a0, .LBB12_2
+; RV64I-NEXT:    beqz a0, .LBB3_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi a1, a0, -1
 ; RV64I-NEXT:    not a0, a0
@@ -518,10 +308,10 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addi a1, a1, 257
 ; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 56
-; RV64I-NEXT:    j .LBB12_3
-; RV64I-NEXT:  .LBB12_2:
+; RV64I-NEXT:    j .LBB3_3
+; RV64I-NEXT:  .LBB3_2:
 ; RV64I-NEXT:    addi a0, zero, 64
-; RV64I-NEXT:  .LBB12_3: # %cond.end
+; RV64I-NEXT:  .LBB3_3: # %cond.end
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -754,10 +544,10 @@ define i64 @sexth_i64(i64 %a) nounwind {
 define signext i32 @min_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: min_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    blt a0, a1, .LBB19_2
+; RV64I-NEXT:    blt a0, a1, .LBB10_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:  .LBB19_2:
+; RV64I-NEXT:  .LBB10_2:
 ; RV64I-NEXT:    ret
 ;
 ; RV64IB-LABEL: min_i32:
@@ -777,10 +567,10 @@ define signext i32 @min_i32(i32 signext %a, i32 signext %b) nounwind {
 define i64 @min_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: min_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    blt a0, a1, .LBB20_2
+; RV64I-NEXT:    blt a0, a1, .LBB11_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:  .LBB20_2:
+; RV64I-NEXT:  .LBB11_2:
 ; RV64I-NEXT:    ret
 ;
 ; RV64IB-LABEL: min_i64:
@@ -800,10 +590,10 @@ define i64 @min_i64(i64 %a, i64 %b) nounwind {
 define signext i32 @max_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: max_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    blt a1, a0, .LBB21_2
+; RV64I-NEXT:    blt a1, a0, .LBB12_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:  .LBB21_2:
+; RV64I-NEXT:  .LBB12_2:
 ; RV64I-NEXT:    ret
 ;
 ; RV64IB-LABEL: max_i32:
@@ -823,10 +613,10 @@ define signext i32 @max_i32(i32 signext %a, i32 signext %b) nounwind {
 define i64 @max_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: max_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    blt a1, a0, .LBB22_2
+; RV64I-NEXT:    blt a1, a0, .LBB13_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:  .LBB22_2:
+; RV64I-NEXT:  .LBB13_2:
 ; RV64I-NEXT:    ret
 ;
 ; RV64IB-LABEL: max_i64:
@@ -846,10 +636,10 @@ define i64 @max_i64(i64 %a, i64 %b) nounwind {
 define signext i32 @minu_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: minu_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    bltu a0, a1, .LBB23_2
+; RV64I-NEXT:    bltu a0, a1, .LBB14_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:  .LBB23_2:
+; RV64I-NEXT:  .LBB14_2:
 ; RV64I-NEXT:    ret
 ;
 ; RV64IB-LABEL: minu_i32:
@@ -869,10 +659,10 @@ define signext i32 @minu_i32(i32 signext %a, i32 signext %b) nounwind {
 define i64 @minu_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: minu_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    bltu a0, a1, .LBB24_2
+; RV64I-NEXT:    bltu a0, a1, .LBB15_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:  .LBB24_2:
+; RV64I-NEXT:  .LBB15_2:
 ; RV64I-NEXT:    ret
 ;
 ; RV64IB-LABEL: minu_i64:
@@ -892,10 +682,10 @@ define i64 @minu_i64(i64 %a, i64 %b) nounwind {
 define signext i32 @maxu_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: maxu_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    bltu a1, a0, .LBB25_2
+; RV64I-NEXT:    bltu a1, a0, .LBB16_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:  .LBB25_2:
+; RV64I-NEXT:  .LBB16_2:
 ; RV64I-NEXT:    ret
 ;
 ; RV64IB-LABEL: maxu_i32:
@@ -915,10 +705,10 @@ define signext i32 @maxu_i32(i32 signext %a, i32 signext %b) nounwind {
 define i64 @maxu_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: maxu_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    bltu a1, a0, .LBB26_2
+; RV64I-NEXT:    bltu a1, a0, .LBB17_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:  .LBB26_2:
+; RV64I-NEXT:  .LBB17_2:
 ; RV64I-NEXT:    ret
 ;
 ; RV64IB-LABEL: maxu_i64:

diff  --git a/llvm/test/CodeGen/RISCV/rv64Zbp.ll b/llvm/test/CodeGen/RISCV/rv64Zbp.ll
index c040261dd990..e479c9285606 100644
--- a/llvm/test/CodeGen/RISCV/rv64Zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv64Zbp.ll
@@ -6,6 +6,216 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbp -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64IBP
 
+define signext i32 @slo_i32(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: slo_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: slo_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    slow a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: slo_i32:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    slow a0, a0, a1
+; RV64IBP-NEXT:    ret
+  %neg = xor i32 %a, -1
+  %shl = shl i32 %neg, %b
+  %neg1 = xor i32 %shl, -1
+  ret i32 %neg1
+}
+
+define i64 @slo_i64(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: slo_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: slo_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    slo a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: slo_i64:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    slo a0, a0, a1
+; RV64IBP-NEXT:    ret
+  %neg = xor i64 %a, -1
+  %shl = shl i64 %neg, %b
+  %neg1 = xor i64 %shl, -1
+  ret i64 %neg1
+}
+
+define signext i32 @sro_i32(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sro_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sro_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    srow a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: sro_i32:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    srow a0, a0, a1
+; RV64IBP-NEXT:    ret
+  %neg = xor i32 %a, -1
+  %shr = lshr i32 %neg, %b
+  %neg1 = xor i32 %shr, -1
+  ret i32 %neg1
+}
+
+define i64 @sro_i64(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: sro_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sro_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sro a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: sro_i64:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    sro a0, a0, a1
+; RV64IBP-NEXT:    ret
+  %neg = xor i64 %a, -1
+  %shr = lshr i64 %neg, %b
+  %neg1 = xor i64 %shr, -1
+  ret i64 %neg1
+}
+
+define signext i32 @sloi_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: sloi_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    ori a0, a0, 1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sloi_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sloiw a0, a0, 1
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: sloi_i32:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    sloiw a0, a0, 1
+; RV64IBP-NEXT:    ret
+  %neg = shl i32 %a, 1
+  %neg12 = or i32 %neg, 1
+  ret i32 %neg12
+}
+
+define i64 @sloi_i64(i64 %a) nounwind {
+; RV64I-LABEL: sloi_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    ori a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sloi_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sloi a0, a0, 1
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: sloi_i64:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    sloi a0, a0, 1
+; RV64IBP-NEXT:    ret
+  %neg = shl i64 %a, 1
+  %neg12 = or i64 %neg, 1
+  ret i64 %neg12
+}
+
+define signext i32 @sroi_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: sroi_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 1
+; RV64I-NEXT:    lui a1, 524288
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sroi_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sroiw a0, a0, 1
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: sroi_i32:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    sroiw a0, a0, 1
+; RV64IBP-NEXT:    ret
+  %neg = lshr i32 %a, 1
+  %neg12 = or i32 %neg, -2147483648
+  ret i32 %neg12
+}
+
+; This is similar to the type legalized version of sroiw but the mask is 0 in
+; the upper bits instead of 1 so the result is not sign extended. Make sure we
+; don't match it to sroiw.
+define i64 @sroiw_bug(i64 %a) nounwind {
+; RV64I-LABEL: sroiw_bug:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 1
+; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    slli a1, a1, 31
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sroiw_bug:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    srli a0, a0, 1
+; RV64IB-NEXT:    sbseti a0, a0, 31
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: sroiw_bug:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    srli a0, a0, 1
+; RV64IBP-NEXT:    addi a1, zero, 1
+; RV64IBP-NEXT:    slli a1, a1, 31
+; RV64IBP-NEXT:    or a0, a0, a1
+; RV64IBP-NEXT:    ret
+  %neg = lshr i64 %a, 1
+  %neg12 = or i64 %neg, 2147483648
+  ret i64 %neg12
+}
+
+define i64 @sroi_i64(i64 %a) nounwind {
+; RV64I-LABEL: sroi_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 1
+; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    slli a1, a1, 63
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sroi_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sroi a0, a0, 1
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: sroi_i64:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    sroi a0, a0, 1
+; RV64IBP-NEXT:    ret
+  %neg = lshr i64 %a, 1
+  %neg12 = or i64 %neg, -9223372036854775808
+  ret i64 %neg12
+}
+
 define signext i32 @gorc1_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: gorc1_i32:
 ; RV64I:       # %bb.0:

diff  --git a/llvm/test/MC/RISCV/rv32zbb-invalid.s b/llvm/test/MC/RISCV/rv32zbb-invalid.s
index e276bb1c2778..4e40b44ee02c 100644
--- a/llvm/test/MC/RISCV/rv32zbb-invalid.s
+++ b/llvm/test/MC/RISCV/rv32zbb-invalid.s
@@ -1,19 +1,5 @@
 # RUN: not llvm-mc -triple riscv32 -mattr=+experimental-b,experimental-zbb < %s 2>&1 | FileCheck %s
 
-# Too few operands
-slo t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-sro t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-sloi t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Immediate operand out of range
-sloi t0, t1, 32 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31]
-sloi t0, t1, -1 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31]
-# Too few operands
-sroi t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Immediate operand out of range
-sroi t0, t1, 32 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31]
-sroi t0, t1, -1 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31]
 # Too many operands
 clz t0, t1, t2 # CHECK: :[[@LINE]]:13: error: invalid operand for instruction
 # Too many operands

diff  --git a/llvm/test/MC/RISCV/rv32zbb-valid.s b/llvm/test/MC/RISCV/rv32zbb-valid.s
index 13cf4a1bf9bd..dd8a4a408a8b 100644
--- a/llvm/test/MC/RISCV/rv32zbb-valid.s
+++ b/llvm/test/MC/RISCV/rv32zbb-valid.s
@@ -12,18 +12,6 @@
 # RUN:     | llvm-objdump --mattr=+experimental-zbb -d -r - \
 # RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
 
-# CHECK-ASM-AND-OBJ: slo t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x12,0x73,0x20]
-slo t0, t1, t2
-# CHECK-ASM-AND-OBJ: sro t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x52,0x73,0x20]
-sro t0, t1, t2
-# CHECK-ASM-AND-OBJ: sloi t0, t1, 0
-# CHECK-ASM: encoding: [0x93,0x12,0x03,0x20]
-sloi t0, t1, 0
-# CHECK-ASM-AND-OBJ: sroi t0, t1, 0
-# CHECK-ASM: encoding: [0x93,0x52,0x03,0x20]
-sroi t0, t1, 0
 # CHECK-ASM-AND-OBJ: clz t0, t1
 # CHECK-ASM: encoding: [0x93,0x12,0x03,0x60]
 clz t0, t1

diff  --git a/llvm/test/MC/RISCV/rv32zbp-invalid.s b/llvm/test/MC/RISCV/rv32zbp-invalid.s
index da804b440550..df72369dbeda 100644
--- a/llvm/test/MC/RISCV/rv32zbp-invalid.s
+++ b/llvm/test/MC/RISCV/rv32zbp-invalid.s
@@ -1,5 +1,19 @@
 # RUN: not llvm-mc -triple riscv32 -mattr=+experimental-b,experimental-zbp < %s 2>&1 | FileCheck %s
 
+# Too few operands
+slo t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Too few operands
+sro t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Too few operands
+sloi t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Immediate operand out of range
+sloi t0, t1, 32 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31]
+sloi t0, t1, -1 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31]
+# Too few operands
+sroi t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Immediate operand out of range
+sroi t0, t1, 32 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31]
+sroi t0, t1, -1 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31]
 # Too few operands
 gorc t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
 # Too few operands

diff  --git a/llvm/test/MC/RISCV/rv32zbp-valid.s b/llvm/test/MC/RISCV/rv32zbp-valid.s
index 6fe79743cd58..698af314d024 100644
--- a/llvm/test/MC/RISCV/rv32zbp-valid.s
+++ b/llvm/test/MC/RISCV/rv32zbp-valid.s
@@ -12,6 +12,18 @@
 # RUN:     | llvm-objdump --mattr=+experimental-zbp -d -r - \
 # RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
 
+# CHECK-ASM-AND-OBJ: slo t0, t1, t2
+# CHECK-ASM: encoding: [0xb3,0x12,0x73,0x20]
+slo t0, t1, t2
+# CHECK-ASM-AND-OBJ: sro t0, t1, t2
+# CHECK-ASM: encoding: [0xb3,0x52,0x73,0x20]
+sro t0, t1, t2
+# CHECK-ASM-AND-OBJ: sloi t0, t1, 0
+# CHECK-ASM: encoding: [0x93,0x12,0x03,0x20]
+sloi t0, t1, 0
+# CHECK-ASM-AND-OBJ: sroi t0, t1, 0
+# CHECK-ASM: encoding: [0x93,0x52,0x03,0x20]
+sroi t0, t1, 0
 # CHECK-ASM-AND-OBJ: gorc t0, t1, t2
 # CHECK-ASM: encoding: [0xb3,0x52,0x73,0x28]
 gorc t0, t1, t2

diff  --git a/llvm/test/MC/RISCV/rv64zbb-invalid.s b/llvm/test/MC/RISCV/rv64zbb-invalid.s
index 7abb6da36e68..fb03fd1af9d8 100644
--- a/llvm/test/MC/RISCV/rv64zbb-invalid.s
+++ b/llvm/test/MC/RISCV/rv64zbb-invalid.s
@@ -1,19 +1,5 @@
 # RUN: not llvm-mc -triple riscv64 -mattr=+experimental-b,experimental-zbb < %s 2>&1 | FileCheck %s
 
-# Too few operands
-slow t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-srow t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-sloiw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Immediate operand out of range
-sloiw t0, t1, 32 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
-sloiw t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
-# Too few operands
-sroiw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Immediate operand out of range
-sroiw t0, t1, 32 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
-sroiw t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
 # Too many operands
 clzw t0, t1, t2 # CHECK: :[[@LINE]]:14: error: invalid operand for instruction
 # Too many operands

diff  --git a/llvm/test/MC/RISCV/rv64zbb-valid.s b/llvm/test/MC/RISCV/rv64zbb-valid.s
index 437ac75e280e..6d43895a96cd 100644
--- a/llvm/test/MC/RISCV/rv64zbb-valid.s
+++ b/llvm/test/MC/RISCV/rv64zbb-valid.s
@@ -12,18 +12,6 @@
 # RUN:     | llvm-objdump --mattr=+experimental-zbb -d -r - \
 # RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
 
-# CHECK-ASM-AND-OBJ: slow t0, t1, t2
-# CHECK-ASM: encoding: [0xbb,0x12,0x73,0x20]
-slow t0, t1, t2
-# CHECK-ASM-AND-OBJ: srow t0, t1, t2
-# CHECK-ASM: encoding: [0xbb,0x52,0x73,0x20]
-srow t0, t1, t2
-# CHECK-ASM-AND-OBJ: sloiw t0, t1, 0
-# CHECK-ASM: encoding: [0x9b,0x12,0x03,0x20]
-sloiw t0, t1, 0
-# CHECK-ASM-AND-OBJ: sroiw t0, t1, 0
-# CHECK-ASM: encoding: [0x9b,0x52,0x03,0x20]
-sroiw t0, t1, 0
 # CHECK-ASM-AND-OBJ: clzw t0, t1
 # CHECK-ASM: encoding: [0x9b,0x12,0x03,0x60]
 clzw t0, t1

diff  --git a/llvm/test/MC/RISCV/rv64zbp-invalid.s b/llvm/test/MC/RISCV/rv64zbp-invalid.s
index c1ccd7d2480c..8b98bdfa41b0 100644
--- a/llvm/test/MC/RISCV/rv64zbp-invalid.s
+++ b/llvm/test/MC/RISCV/rv64zbp-invalid.s
@@ -1,5 +1,19 @@
 # RUN: not llvm-mc -triple riscv64 -mattr=+experimental-b,experimental-zbp < %s 2>&1 | FileCheck %s
 
+# Too few operands
+slow t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Too few operands
+srow t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Too few operands
+sloiw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Immediate operand out of range
+sloiw t0, t1, 32 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
+sloiw t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
+# Too few operands
+sroiw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Immediate operand out of range
+sroiw t0, t1, 32 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
+sroiw t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
 # Too few operands
 gorcw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
 # Too few operands

diff  --git a/llvm/test/MC/RISCV/rv64zbp-valid.s b/llvm/test/MC/RISCV/rv64zbp-valid.s
index 79c99c5cf723..6ec199a42dca 100644
--- a/llvm/test/MC/RISCV/rv64zbp-valid.s
+++ b/llvm/test/MC/RISCV/rv64zbp-valid.s
@@ -12,6 +12,18 @@
 # RUN:     | llvm-objdump --mattr=+experimental-zbp -d -r - \
 # RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
 
+# CHECK-ASM-AND-OBJ: slow t0, t1, t2
+# CHECK-ASM: encoding: [0xbb,0x12,0x73,0x20]
+slow t0, t1, t2
+# CHECK-ASM-AND-OBJ: srow t0, t1, t2
+# CHECK-ASM: encoding: [0xbb,0x52,0x73,0x20]
+srow t0, t1, t2
+# CHECK-ASM-AND-OBJ: sloiw t0, t1, 0
+# CHECK-ASM: encoding: [0x9b,0x12,0x03,0x20]
+sloiw t0, t1, 0
+# CHECK-ASM-AND-OBJ: sroiw t0, t1, 0
+# CHECK-ASM: encoding: [0x9b,0x52,0x03,0x20]
+sroiw t0, t1, 0
 # CHECK-ASM-AND-OBJ: gorcw t0, t1, t2
 # CHECK-ASM: encoding: [0xbb,0x52,0x73,0x28]
 gorcw t0, t1, t2


        


More information about the llvm-commits mailing list