[llvm] b94763d - [RISCV] Add test cases showing missed opportunities to use sbset/sbclr/sbinv/sbext when the shift amount isn't masked to log2(xlen) bits. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Nov 7 14:25:33 PST 2020


Author: Craig Topper
Date: 2020-11-07T14:25:16-08:00
New Revision: b94763d52403dd6f9f603d9f09a012174e0ff7c1

URL: https://github.com/llvm/llvm-project/commit/b94763d52403dd6f9f603d9f09a012174e0ff7c1
DIFF: https://github.com/llvm/llvm-project/commit/b94763d52403dd6f9f603d9f09a012174e0ff7c1.diff

LOG: [RISCV] Add test cases showing missed opportunities to use sbset/sbclr/sbinv/sbext when the shift amount isn't masked to log2(xlen) bits. NFC

Out of bounds shifts are undefined and these instructions mask
their shift amount before use. So we don't need to see a mask
in order to select the instructions.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rv32Zbs.ll
    llvm/test/CodeGen/RISCV/rv64Zbs.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rv32Zbs.ll b/llvm/test/CodeGen/RISCV/rv32Zbs.ll
index 16da34e49c66..dbbf06f6e586 100644
--- a/llvm/test/CodeGen/RISCV/rv32Zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv32Zbs.ll
@@ -31,21 +31,48 @@ define i32 @sbclr_i32(i32 %a, i32 %b) nounwind {
   ret i32 %and1
 }
 
+define i32 @sbclr_i32_no_mask(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: sbclr_i32_no_mask:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    sll a1, a2, a1
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sbclr_i32_no_mask:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    sbclr a0, a0, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBS-LABEL: sbclr_i32_no_mask:
+; RV32IBS:       # %bb.0:
+; RV32IBS-NEXT:    addi a2, zero, 1
+; RV32IBS-NEXT:    sll a1, a2, a1
+; RV32IBS-NEXT:    not a1, a1
+; RV32IBS-NEXT:    and a0, a1, a0
+; RV32IBS-NEXT:    ret
+  %shl = shl nuw i32 1, %b
+  %neg = xor i32 %shl, -1
+  %and1 = and i32 %neg, %a
+  ret i32 %and1
+}
+
 define i64 @sbclr_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: sbclr_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a3, a2, 63
 ; RV32I-NEXT:    addi a4, a3, -32
 ; RV32I-NEXT:    addi a3, zero, 1
-; RV32I-NEXT:    bltz a4, .LBB1_2
+; RV32I-NEXT:    bltz a4, .LBB2_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a2, zero
 ; RV32I-NEXT:    sll a4, a3, a4
-; RV32I-NEXT:    j .LBB1_3
-; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    j .LBB2_3
+; RV32I-NEXT:  .LBB2_2:
 ; RV32I-NEXT:    mv a4, zero
 ; RV32I-NEXT:    sll a2, a3, a2
-; RV32I-NEXT:  .LBB1_3:
+; RV32I-NEXT:  .LBB2_3:
 ; RV32I-NEXT:    not a3, a4
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    and a0, a2, a0
@@ -57,15 +84,15 @@ define i64 @sbclr_i64(i64 %a, i64 %b) nounwind {
 ; RV32IB-NEXT:    andi a3, a2, 63
 ; RV32IB-NEXT:    addi a4, a3, -32
 ; RV32IB-NEXT:    addi a3, zero, 1
-; RV32IB-NEXT:    bltz a4, .LBB1_2
+; RV32IB-NEXT:    bltz a4, .LBB2_2
 ; RV32IB-NEXT:  # %bb.1:
 ; RV32IB-NEXT:    mv a2, zero
 ; RV32IB-NEXT:    sll a4, a3, a4
-; RV32IB-NEXT:    j .LBB1_3
-; RV32IB-NEXT:  .LBB1_2:
+; RV32IB-NEXT:    j .LBB2_3
+; RV32IB-NEXT:  .LBB2_2:
 ; RV32IB-NEXT:    mv a4, zero
 ; RV32IB-NEXT:    sll a2, a3, a2
-; RV32IB-NEXT:  .LBB1_3:
+; RV32IB-NEXT:  .LBB2_3:
 ; RV32IB-NEXT:    andn a0, a0, a2
 ; RV32IB-NEXT:    andn a1, a1, a4
 ; RV32IB-NEXT:    ret
@@ -75,15 +102,15 @@ define i64 @sbclr_i64(i64 %a, i64 %b) nounwind {
 ; RV32IBS-NEXT:    andi a3, a2, 63
 ; RV32IBS-NEXT:    addi a4, a3, -32
 ; RV32IBS-NEXT:    addi a3, zero, 1
-; RV32IBS-NEXT:    bltz a4, .LBB1_2
+; RV32IBS-NEXT:    bltz a4, .LBB2_2
 ; RV32IBS-NEXT:  # %bb.1:
 ; RV32IBS-NEXT:    mv a2, zero
 ; RV32IBS-NEXT:    sll a4, a3, a4
-; RV32IBS-NEXT:    j .LBB1_3
-; RV32IBS-NEXT:  .LBB1_2:
+; RV32IBS-NEXT:    j .LBB2_3
+; RV32IBS-NEXT:  .LBB2_2:
 ; RV32IBS-NEXT:    mv a4, zero
 ; RV32IBS-NEXT:    sll a2, a3, a2
-; RV32IBS-NEXT:  .LBB1_3:
+; RV32IBS-NEXT:  .LBB2_3:
 ; RV32IBS-NEXT:    not a3, a4
 ; RV32IBS-NEXT:    not a2, a2
 ; RV32IBS-NEXT:    and a0, a2, a0
@@ -119,6 +146,32 @@ define i32 @sbset_i32(i32 %a, i32 %b) nounwind {
   ret i32 %or
 }
 
+define i32 @sbset_i32_no_mask(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: sbset_i32_no_mask:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    sll a1, a2, a1
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sbset_i32_no_mask:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    addi a2, zero, 1
+; RV32IB-NEXT:    sll a1, a2, a1
+; RV32IB-NEXT:    or a0, a1, a0
+; RV32IB-NEXT:    ret
+;
+; RV32IBS-LABEL: sbset_i32_no_mask:
+; RV32IBS:       # %bb.0:
+; RV32IBS-NEXT:    addi a2, zero, 1
+; RV32IBS-NEXT:    sll a1, a2, a1
+; RV32IBS-NEXT:    or a0, a1, a0
+; RV32IBS-NEXT:    ret
+  %shl = shl nuw i32 1, %b
+  %or = or i32 %shl, %a
+  ret i32 %or
+}
+
 ; As we are not matching directly i64 code patterns on RV32 some i64 patterns
 ; don't have yet any matching bit manipulation instructions on RV32.
 ; This test is presented here in case future expansions of the experimental-b
@@ -244,6 +297,29 @@ define i32 @sbext_i32(i32 %a, i32 %b) nounwind {
   ret i32 %and1
 }
 
+define i32 @sbext_i32_no_mask(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: sbext_i32_no_mask:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srl a0, a0, a1
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sbext_i32_no_mask:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    srl a0, a0, a1
+; RV32IB-NEXT:    andi a0, a0, 1
+; RV32IB-NEXT:    ret
+;
+; RV32IBS-LABEL: sbext_i32_no_mask:
+; RV32IBS:       # %bb.0:
+; RV32IBS-NEXT:    srl a0, a0, a1
+; RV32IBS-NEXT:    andi a0, a0, 1
+; RV32IBS-NEXT:    ret
+  %shr = lshr i32 %a, %b
+  %and1 = and i32 %shr, 1
+  ret i32 %and1
+}
+
 ; As we are not matching directly i64 code patterns on RV32 some i64 patterns
 ; don't have yet any matching bit manipulation instructions on RV32.
 ; This test is presented here in case future expansions of the experimental-b
@@ -254,18 +330,18 @@ define i64 @sbext_i64(i64 %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a3, a2, 63
 ; RV32I-NEXT:    addi a4, a3, -32
-; RV32I-NEXT:    bltz a4, .LBB7_2
+; RV32I-NEXT:    bltz a4, .LBB10_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srl a0, a1, a4
-; RV32I-NEXT:    j .LBB7_3
-; RV32I-NEXT:  .LBB7_2:
+; RV32I-NEXT:    j .LBB10_3
+; RV32I-NEXT:  .LBB10_2:
 ; RV32I-NEXT:    srl a0, a0, a2
 ; RV32I-NEXT:    addi a2, zero, 31
 ; RV32I-NEXT:    sub a2, a2, a3
 ; RV32I-NEXT:    slli a1, a1, 1
 ; RV32I-NEXT:    sll a1, a1, a2
 ; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:  .LBB7_3:
+; RV32I-NEXT:  .LBB10_3:
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    mv a1, zero
 ; RV32I-NEXT:    ret
@@ -274,18 +350,18 @@ define i64 @sbext_i64(i64 %a, i64 %b) nounwind {
 ; RV32IB:       # %bb.0:
 ; RV32IB-NEXT:    andi a3, a2, 63
 ; RV32IB-NEXT:    addi a4, a3, -32
-; RV32IB-NEXT:    bltz a4, .LBB7_2
+; RV32IB-NEXT:    bltz a4, .LBB10_2
 ; RV32IB-NEXT:  # %bb.1:
 ; RV32IB-NEXT:    srl a0, a1, a4
-; RV32IB-NEXT:    j .LBB7_3
-; RV32IB-NEXT:  .LBB7_2:
+; RV32IB-NEXT:    j .LBB10_3
+; RV32IB-NEXT:  .LBB10_2:
 ; RV32IB-NEXT:    srl a0, a0, a2
 ; RV32IB-NEXT:    addi a2, zero, 31
 ; RV32IB-NEXT:    sub a2, a2, a3
 ; RV32IB-NEXT:    slli a1, a1, 1
 ; RV32IB-NEXT:    sll a1, a1, a2
 ; RV32IB-NEXT:    or a0, a0, a1
-; RV32IB-NEXT:  .LBB7_3:
+; RV32IB-NEXT:  .LBB10_3:
 ; RV32IB-NEXT:    andi a0, a0, 1
 ; RV32IB-NEXT:    mv a1, zero
 ; RV32IB-NEXT:    ret
@@ -294,18 +370,18 @@ define i64 @sbext_i64(i64 %a, i64 %b) nounwind {
 ; RV32IBS:       # %bb.0:
 ; RV32IBS-NEXT:    andi a3, a2, 63
 ; RV32IBS-NEXT:    addi a4, a3, -32
-; RV32IBS-NEXT:    bltz a4, .LBB7_2
+; RV32IBS-NEXT:    bltz a4, .LBB10_2
 ; RV32IBS-NEXT:  # %bb.1:
 ; RV32IBS-NEXT:    srl a0, a1, a4
-; RV32IBS-NEXT:    j .LBB7_3
-; RV32IBS-NEXT:  .LBB7_2:
+; RV32IBS-NEXT:    j .LBB10_3
+; RV32IBS-NEXT:  .LBB10_2:
 ; RV32IBS-NEXT:    srl a0, a0, a2
 ; RV32IBS-NEXT:    addi a2, zero, 31
 ; RV32IBS-NEXT:    sub a2, a2, a3
 ; RV32IBS-NEXT:    slli a1, a1, 1
 ; RV32IBS-NEXT:    sll a1, a1, a2
 ; RV32IBS-NEXT:    or a0, a0, a1
-; RV32IBS-NEXT:  .LBB7_3:
+; RV32IBS-NEXT:  .LBB10_3:
 ; RV32IBS-NEXT:    andi a0, a0, 1
 ; RV32IBS-NEXT:    mv a1, zero
 ; RV32IBS-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rv64Zbs.ll b/llvm/test/CodeGen/RISCV/rv64Zbs.ll
index f7990b36dec8..9fbbbf381b4a 100644
--- a/llvm/test/CodeGen/RISCV/rv64Zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64Zbs.ll
@@ -31,6 +31,30 @@ define signext i32 @sbclr_i32(i32 signext %a, i32 signext %b) nounwind {
   ret i32 %and1
 }
 
+define signext i32 @sbclr_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sbclr_i32_no_mask:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    sllw a1, a2, a1
+; RV64I-NEXT:    not a1, a1
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbclr_i32_no_mask:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbclrw a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbclr_i32_no_mask:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbclrw a0, a0, a1
+; RV64IBS-NEXT:    ret
+  %shl = shl i32 1, %b
+  %neg = xor i32 %shl, -1
+  %and1 = and i32 %neg, %a
+  ret i32 %and1
+}
+
 define i64 @sbclr_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: sbclr_i64:
 ; RV64I:       # %bb.0:
@@ -56,6 +80,33 @@ define i64 @sbclr_i64(i64 %a, i64 %b) nounwind {
   ret i64 %and1
 }
 
+define i64 @sbclr_i64_no_mask(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: sbclr_i64_no_mask:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    sll a1, a2, a1
+; RV64I-NEXT:    not a1, a1
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbclr_i64_no_mask:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbclr a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbclr_i64_no_mask:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    addi a2, zero, 1
+; RV64IBS-NEXT:    sll a1, a2, a1
+; RV64IBS-NEXT:    not a1, a1
+; RV64IBS-NEXT:    and a0, a1, a0
+; RV64IBS-NEXT:    ret
+  %shl = shl i64 1, %b
+  %neg = xor i64 %shl, -1
+  %and1 = and i64 %neg, %a
+  ret i64 %and1
+}
+
 define signext i32 @sbset_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sbset_i32:
 ; RV64I:       # %bb.0:
@@ -79,6 +130,28 @@ define signext i32 @sbset_i32(i32 signext %a, i32 signext %b) nounwind {
   ret i32 %or
 }
 
+define signext i32 @sbset_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sbset_i32_no_mask:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    sllw a1, a2, a1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbset_i32_no_mask:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbsetw a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbset_i32_no_mask:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbsetw a0, a0, a1
+; RV64IBS-NEXT:    ret
+  %shl = shl i32 1, %b
+  %or = or i32 %shl, %a
+  ret i32 %or
+}
+
 define i64 @sbset_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: sbset_i64:
 ; RV64I:       # %bb.0:
@@ -102,6 +175,32 @@ define i64 @sbset_i64(i64 %a, i64 %b) nounwind {
   ret i64 %or
 }
 
+define i64 @sbset_i64_no_mask(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: sbset_i64_no_mask:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    sll a1, a2, a1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbset_i64_no_mask:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    addi a2, zero, 1
+; RV64IB-NEXT:    sll a1, a2, a1
+; RV64IB-NEXT:    or a0, a1, a0
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbset_i64_no_mask:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    addi a2, zero, 1
+; RV64IBS-NEXT:    sll a1, a2, a1
+; RV64IBS-NEXT:    or a0, a1, a0
+; RV64IBS-NEXT:    ret
+  %shl = shl i64 1, %b
+  %or = or i64 %shl, %a
+  ret i64 %or
+}
+
 define signext i32 @sbinv_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sbinv_i32:
 ; RV64I:       # %bb.0:
@@ -125,6 +224,28 @@ define signext i32 @sbinv_i32(i32 signext %a, i32 signext %b) nounwind {
   ret i32 %xor
 }
 
+define signext i32 @sbinv_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sbinv_i32_no_mask:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    sllw a1, a2, a1
+; RV64I-NEXT:    xor a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbinv_i32_no_mask:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbinvw a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbinv_i32_no_mask:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbinvw a0, a0, a1
+; RV64IBS-NEXT:    ret
+  %shl = shl i32 1, %b
+  %xor = xor i32 %shl, %a
+  ret i32 %xor
+}
+
 define i64 @sbinv_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: sbinv_i64:
 ; RV64I:       # %bb.0:
@@ -148,6 +269,32 @@ define i64 @sbinv_i64(i64 %a, i64 %b) nounwind {
   ret i64 %xor
 }
 
+define i64 @sbinv_i64_no_mask(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: sbinv_i64_no_mask:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    sll a1, a2, a1
+; RV64I-NEXT:    xor a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbinv_i64_no_mask:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    addi a2, zero, 1
+; RV64IB-NEXT:    sll a1, a2, a1
+; RV64IB-NEXT:    xor a0, a1, a0
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbinv_i64_no_mask:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    addi a2, zero, 1
+; RV64IBS-NEXT:    sll a1, a2, a1
+; RV64IBS-NEXT:    xor a0, a1, a0
+; RV64IBS-NEXT:    ret
+  %shl = shl nuw i64 1, %b
+  %xor = xor i64 %shl, %a
+  ret i64 %xor
+}
+
 define signext i32 @sbext_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sbext_i32:
 ; RV64I:       # %bb.0:
@@ -170,6 +317,27 @@ define signext i32 @sbext_i32(i32 signext %a, i32 signext %b) nounwind {
   ret i32 %and1
 }
 
+define signext i32 @sbext_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sbext_i32_no_mask:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbext_i32_no_mask:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbextw a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbext_i32_no_mask:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbextw a0, a0, a1
+; RV64IBS-NEXT:    ret
+  %shr = lshr i32 %a, %b
+  %and1 = and i32 %shr, 1
+  ret i32 %and1
+}
+
 define i64 @sbext_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: sbext_i64:
 ; RV64I:       # %bb.0:
@@ -192,6 +360,29 @@ define i64 @sbext_i64(i64 %a, i64 %b) nounwind {
   ret i64 %and1
 }
 
+define i64 @sbext_i64_no_mask(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: sbext_i64_no_mask:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbext_i64_no_mask:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    srl a0, a0, a1
+; RV64IB-NEXT:    andi a0, a0, 1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbext_i64_no_mask:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    srl a0, a0, a1
+; RV64IBS-NEXT:    andi a0, a0, 1
+; RV64IBS-NEXT:    ret
+  %shr = lshr i64 %a, %b
+  %and1 = and i64 %shr, 1
+  ret i64 %and1
+}
+
 define signext i32 @sbexti_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: sbexti_i32:
 ; RV64I:       # %bb.0:


        


More information about the llvm-commits mailing list