[llvm] d4be333 - [RISCV] Add matching of codegen patterns to RISCV Bit Manipulation Zbs asm instructions

via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 15 04:20:37 PDT 2020


Author: lewis-revill
Date: 2020-07-15T12:19:34+01:00
New Revision: d4be33374c07ea9a9362892876aa76b227298181

URL: https://github.com/llvm/llvm-project/commit/d4be33374c07ea9a9362892876aa76b227298181
DIFF: https://github.com/llvm/llvm-project/commit/d4be33374c07ea9a9362892876aa76b227298181.diff

LOG: [RISCV] Add matching of codegen patterns to RISCV Bit Manipulation Zbs asm instructions

This patch provides optimization of bit manipulation operations by
enabling the +experimental-b target feature.
It adds matching of single block patterns of instructions to specific
bit-manip instructions from the single-bit subset (zbs subextension) of
the experimental B extension of RISC-V.
It adds also the correspondent codegen tests.

This patch is based on Claire Wolf's proposal for the bit manipulation
extension of RISCV:
https://github.com/riscv/riscv-bitmanip/blob/master/bitmanip-0.92.pdf

Differential Revision: https://reviews.llvm.org/D79874

Added: 
    llvm/test/CodeGen/RISCV/rv32Zbs.ll
    llvm/test/CodeGen/RISCV/rv64Zbs.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoB.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
index 45eb41f93b2e..aa1ed7ff79cd 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
@@ -664,6 +664,38 @@ def : Pat<(rotr GPR:$rs1, GPR:$rs2), (ROR GPR:$rs1, GPR:$rs2)>;
 def : Pat<(fshr GPR:$rs1, GPR:$rs1, GPR:$rs2), (ROR GPR:$rs1, GPR:$rs2)>;
 } // Predicates = [HasStdExtZbbOrZbp]
 
+let Predicates = [HasStdExtZbs, IsRV32] in
+def : Pat<(and (xor (shl 1, (and GPR:$rs2, 31)), -1), GPR:$rs1),
+          (SBCLR GPR:$rs1, GPR:$rs2)>;
+let Predicates = [HasStdExtZbs, IsRV64] in
+def : Pat<(and (xor (shl 1, (and GPR:$rs2, 63)), -1), GPR:$rs1),
+          (SBCLR GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbs] in
+def : Pat<(and (rotl -2, GPR:$rs2), GPR:$rs1), (SBCLR GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbs, IsRV32] in
+def : Pat<(or (shl 1, (and GPR:$rs2, 31)), GPR:$rs1),
+          (SBSET GPR:$rs1, GPR:$rs2)>;
+let Predicates = [HasStdExtZbs, IsRV64] in
+def : Pat<(or (shl 1, (and GPR:$rs2, 63)), GPR:$rs1),
+          (SBSET GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbs, IsRV32] in
+def : Pat<(xor (shl 1, (and GPR:$rs2, 31)), GPR:$rs1),
+          (SBINV GPR:$rs1, GPR:$rs2)>;
+let Predicates = [HasStdExtZbs, IsRV64] in
+def : Pat<(xor (shl 1, (and GPR:$rs2, 63)), GPR:$rs1),
+          (SBINV GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbs, IsRV32] in
+def : Pat<(and (srl GPR:$rs1, (and GPR:$rs2, 31)), 1),
+          (SBEXT GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbs, IsRV64] in
+def : Pat<(and (srl GPR:$rs1, (and GPR:$rs2, 63)), 1),
+          (SBEXT GPR:$rs1, GPR:$rs2)>;
+
 let Predicates = [HasStdExtZbb] in {
 def : Pat<(SLOIPat GPR:$rs1, uimmlog2xlen:$shamt),
           (SLOI GPR:$rs1, uimmlog2xlen:$shamt)>;
@@ -678,6 +710,12 @@ let Predicates = [HasStdExtZbbOrZbp] in
 def : Pat<(RORIPat GPR:$rs1, uimmlog2xlen:$shamt),
           (RORI GPR:$rs1, uimmlog2xlen:$shamt)>;
 
+// We don't pattern-match sbclri[w], sbseti[w], sbinvi[w] because they are
+// pattern-matched by simple andi, ori, and xori.
+let Predicates = [HasStdExtZbs] in
+def : Pat<(and (srl GPR:$rs1, uimmlog2xlen:$shamt), (XLenVT 1)),
+          (SBEXTI GPR:$rs1, uimmlog2xlen:$shamt)>;
+
 let Predicates = [HasStdExtZbp, IsRV32] in {
 def : Pat<(or (or (and (srl GPR:$rs1, (i32 1)), (i32 0x55555555)), GPR:$rs1),
               (and (shl GPR:$rs1, (i32 1)), (i32 0xAAAAAAAA))),
@@ -886,6 +924,21 @@ def : Pat<(or (riscv_sllw (assertsexti32 GPR:$rs1),
           (RORW GPR:$rs1, GPR:$rs2)>;
 } // Predicates = [HasStdExtZbbOrZbp, IsRV64]
 
+let Predicates = [HasStdExtZbs, IsRV64] in {
+def : Pat<(and (xor (riscv_sllw 1, (assertsexti32 GPR:$rs2)), -1),
+               (assertsexti32 GPR:$rs1)),
+          (SBCLRW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(or (riscv_sllw 1, (assertsexti32 GPR:$rs2)),
+              (assertsexti32 GPR:$rs1)),
+          (SBSETW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(xor (riscv_sllw 1, (assertsexti32 GPR:$rs2)),
+               (assertsexti32 GPR:$rs1)),
+          (SBINVW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(and (riscv_srlw (assertsexti32 GPR:$rs1), (assertsexti32 GPR:$rs2)),
+               1),
+          (SBEXTW GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtZbs, IsRV64]
+
 let Predicates = [HasStdExtZbb, IsRV64] in {
 def : Pat<(SLOIWPat GPR:$rs1, uimmlog2xlen:$shamt),
           (SLOIW GPR:$rs1, uimmlog2xlen:$shamt)>;

diff  --git a/llvm/test/CodeGen/RISCV/rv32Zbs.ll b/llvm/test/CodeGen/RISCV/rv32Zbs.ll
new file mode 100644
index 000000000000..16da34e49c66
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32Zbs.ll
@@ -0,0 +1,361 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-b -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32IB
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbs -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32IBS
+
+define i32 @sbclr_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: sbclr_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    sll a1, a2, a1
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sbclr_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    sbclr a0, a0, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBS-LABEL: sbclr_i32:
+; RV32IBS:       # %bb.0:
+; RV32IBS-NEXT:    sbclr a0, a0, a1
+; RV32IBS-NEXT:    ret
+  %and = and i32 %b, 31
+  %shl = shl nuw i32 1, %and
+  %neg = xor i32 %shl, -1
+  %and1 = and i32 %neg, %a
+  ret i32 %and1
+}
+
+define i64 @sbclr_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: sbclr_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a3, a2, 63
+; RV32I-NEXT:    addi a4, a3, -32
+; RV32I-NEXT:    addi a3, zero, 1
+; RV32I-NEXT:    bltz a4, .LBB1_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    sll a4, a3, a4
+; RV32I-NEXT:    j .LBB1_3
+; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    sll a2, a3, a2
+; RV32I-NEXT:  .LBB1_3:
+; RV32I-NEXT:    not a3, a4
+; RV32I-NEXT:    not a2, a2
+; RV32I-NEXT:    and a0, a2, a0
+; RV32I-NEXT:    and a1, a3, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sbclr_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    andi a3, a2, 63
+; RV32IB-NEXT:    addi a4, a3, -32
+; RV32IB-NEXT:    addi a3, zero, 1
+; RV32IB-NEXT:    bltz a4, .LBB1_2
+; RV32IB-NEXT:  # %bb.1:
+; RV32IB-NEXT:    mv a2, zero
+; RV32IB-NEXT:    sll a4, a3, a4
+; RV32IB-NEXT:    j .LBB1_3
+; RV32IB-NEXT:  .LBB1_2:
+; RV32IB-NEXT:    mv a4, zero
+; RV32IB-NEXT:    sll a2, a3, a2
+; RV32IB-NEXT:  .LBB1_3:
+; RV32IB-NEXT:    andn a0, a0, a2
+; RV32IB-NEXT:    andn a1, a1, a4
+; RV32IB-NEXT:    ret
+;
+; RV32IBS-LABEL: sbclr_i64:
+; RV32IBS:       # %bb.0:
+; RV32IBS-NEXT:    andi a3, a2, 63
+; RV32IBS-NEXT:    addi a4, a3, -32
+; RV32IBS-NEXT:    addi a3, zero, 1
+; RV32IBS-NEXT:    bltz a4, .LBB1_2
+; RV32IBS-NEXT:  # %bb.1:
+; RV32IBS-NEXT:    mv a2, zero
+; RV32IBS-NEXT:    sll a4, a3, a4
+; RV32IBS-NEXT:    j .LBB1_3
+; RV32IBS-NEXT:  .LBB1_2:
+; RV32IBS-NEXT:    mv a4, zero
+; RV32IBS-NEXT:    sll a2, a3, a2
+; RV32IBS-NEXT:  .LBB1_3:
+; RV32IBS-NEXT:    not a3, a4
+; RV32IBS-NEXT:    not a2, a2
+; RV32IBS-NEXT:    and a0, a2, a0
+; RV32IBS-NEXT:    and a1, a3, a1
+; RV32IBS-NEXT:    ret
+  %and = and i64 %b, 63
+  %shl = shl nuw i64 1, %and
+  %neg = xor i64 %shl, -1
+  %and1 = and i64 %neg, %a
+  ret i64 %and1
+}
+
+define i32 @sbset_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: sbset_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    sll a1, a2, a1
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sbset_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    sbset a0, a0, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBS-LABEL: sbset_i32:
+; RV32IBS:       # %bb.0:
+; RV32IBS-NEXT:    sbset a0, a0, a1
+; RV32IBS-NEXT:    ret
+  %and = and i32 %b, 31
+  %shl = shl nuw i32 1, %and
+  %or = or i32 %shl, %a
+  ret i32 %or
+}
+
+; As we are not matching directly i64 code patterns on RV32 some i64 patterns
+; don't have yet any matching bit manipulation instructions on RV32.
+; This test is presented here in case future expansions of the experimental-b
+; extension introduce instructions suitable for this pattern.
+
+define i64 @sbset_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: sbset_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a3, zero, 1
+; RV32I-NEXT:    sll a2, a3, a2
+; RV32I-NEXT:    srai a3, a2, 31
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a1, a3, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sbset_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    addi a3, zero, 1
+; RV32IB-NEXT:    sll a2, a3, a2
+; RV32IB-NEXT:    srai a3, a2, 31
+; RV32IB-NEXT:    or a0, a2, a0
+; RV32IB-NEXT:    or a1, a3, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBS-LABEL: sbset_i64:
+; RV32IBS:       # %bb.0:
+; RV32IBS-NEXT:    addi a3, zero, 1
+; RV32IBS-NEXT:    sll a2, a3, a2
+; RV32IBS-NEXT:    srai a3, a2, 31
+; RV32IBS-NEXT:    or a0, a2, a0
+; RV32IBS-NEXT:    or a1, a3, a1
+; RV32IBS-NEXT:    ret
+  %1 = trunc i64 %b to i32
+  %conv = and i32 %1, 63
+  %shl = shl nuw i32 1, %conv
+  %conv1 = sext i32 %shl to i64
+  %or = or i64 %conv1, %a
+  ret i64 %or
+}
+
+define i32 @sbinv_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: sbinv_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    sll a1, a2, a1
+; RV32I-NEXT:    xor a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sbinv_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    sbinv a0, a0, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBS-LABEL: sbinv_i32:
+; RV32IBS:       # %bb.0:
+; RV32IBS-NEXT:    sbinv a0, a0, a1
+; RV32IBS-NEXT:    ret
+  %and = and i32 %b, 31
+  %shl = shl nuw i32 1, %and
+  %xor = xor i32 %shl, %a
+  ret i32 %xor
+}
+
+; As we are not matching directly i64 code patterns on RV32 some i64 patterns
+; don't have yet any matching bit manipulation instructions on RV32.
+; This test is presented here in case future expansions of the experimental-b
+; extension introduce instructions suitable for this pattern.
+
+define i64 @sbinv_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: sbinv_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a3, zero, 1
+; RV32I-NEXT:    sll a2, a3, a2
+; RV32I-NEXT:    srai a3, a2, 31
+; RV32I-NEXT:    xor a0, a2, a0
+; RV32I-NEXT:    xor a1, a3, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sbinv_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    addi a3, zero, 1
+; RV32IB-NEXT:    sll a2, a3, a2
+; RV32IB-NEXT:    srai a3, a2, 31
+; RV32IB-NEXT:    xor a0, a2, a0
+; RV32IB-NEXT:    xor a1, a3, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBS-LABEL: sbinv_i64:
+; RV32IBS:       # %bb.0:
+; RV32IBS-NEXT:    addi a3, zero, 1
+; RV32IBS-NEXT:    sll a2, a3, a2
+; RV32IBS-NEXT:    srai a3, a2, 31
+; RV32IBS-NEXT:    xor a0, a2, a0
+; RV32IBS-NEXT:    xor a1, a3, a1
+; RV32IBS-NEXT:    ret
+  %1 = trunc i64 %b to i32
+  %conv = and i32 %1, 63
+  %shl = shl nuw i32 1, %conv
+  %conv1 = sext i32 %shl to i64
+  %xor = xor i64 %conv1, %a
+  ret i64 %xor
+}
+
+define i32 @sbext_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: sbext_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srl a0, a0, a1
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sbext_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    sbext a0, a0, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBS-LABEL: sbext_i32:
+; RV32IBS:       # %bb.0:
+; RV32IBS-NEXT:    sbext a0, a0, a1
+; RV32IBS-NEXT:    ret
+  %and = and i32 %b, 31
+  %shr = lshr i32 %a, %and
+  %and1 = and i32 %shr, 1
+  ret i32 %and1
+}
+
+; As we are not matching directly i64 code patterns on RV32 some i64 patterns
+; don't have yet any matching bit manipulation instructions on RV32.
+; This test is presented here in case future expansions of the experimental-b
+; extension introduce instructions suitable for this pattern.
+
+define i64 @sbext_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: sbext_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a3, a2, 63
+; RV32I-NEXT:    addi a4, a3, -32
+; RV32I-NEXT:    bltz a4, .LBB7_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    srl a0, a1, a4
+; RV32I-NEXT:    j .LBB7_3
+; RV32I-NEXT:  .LBB7_2:
+; RV32I-NEXT:    srl a0, a0, a2
+; RV32I-NEXT:    addi a2, zero, 31
+; RV32I-NEXT:    sub a2, a2, a3
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    sll a1, a1, a2
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:  .LBB7_3:
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sbext_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    andi a3, a2, 63
+; RV32IB-NEXT:    addi a4, a3, -32
+; RV32IB-NEXT:    bltz a4, .LBB7_2
+; RV32IB-NEXT:  # %bb.1:
+; RV32IB-NEXT:    srl a0, a1, a4
+; RV32IB-NEXT:    j .LBB7_3
+; RV32IB-NEXT:  .LBB7_2:
+; RV32IB-NEXT:    srl a0, a0, a2
+; RV32IB-NEXT:    addi a2, zero, 31
+; RV32IB-NEXT:    sub a2, a2, a3
+; RV32IB-NEXT:    slli a1, a1, 1
+; RV32IB-NEXT:    sll a1, a1, a2
+; RV32IB-NEXT:    or a0, a0, a1
+; RV32IB-NEXT:  .LBB7_3:
+; RV32IB-NEXT:    andi a0, a0, 1
+; RV32IB-NEXT:    mv a1, zero
+; RV32IB-NEXT:    ret
+;
+; RV32IBS-LABEL: sbext_i64:
+; RV32IBS:       # %bb.0:
+; RV32IBS-NEXT:    andi a3, a2, 63
+; RV32IBS-NEXT:    addi a4, a3, -32
+; RV32IBS-NEXT:    bltz a4, .LBB7_2
+; RV32IBS-NEXT:  # %bb.1:
+; RV32IBS-NEXT:    srl a0, a1, a4
+; RV32IBS-NEXT:    j .LBB7_3
+; RV32IBS-NEXT:  .LBB7_2:
+; RV32IBS-NEXT:    srl a0, a0, a2
+; RV32IBS-NEXT:    addi a2, zero, 31
+; RV32IBS-NEXT:    sub a2, a2, a3
+; RV32IBS-NEXT:    slli a1, a1, 1
+; RV32IBS-NEXT:    sll a1, a1, a2
+; RV32IBS-NEXT:    or a0, a0, a1
+; RV32IBS-NEXT:  .LBB7_3:
+; RV32IBS-NEXT:    andi a0, a0, 1
+; RV32IBS-NEXT:    mv a1, zero
+; RV32IBS-NEXT:    ret
+  %conv = and i64 %b, 63
+  %shr = lshr i64 %a, %conv
+  %and1 = and i64 %shr, 1
+  ret i64 %and1
+}
+
+define i32 @sbexti_i32(i32 %a) nounwind {
+; RV32I-LABEL: sbexti_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a0, a0, 5
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sbexti_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    sbexti a0, a0, 5
+; RV32IB-NEXT:    ret
+;
+; RV32IBS-LABEL: sbexti_i32:
+; RV32IBS:       # %bb.0:
+; RV32IBS-NEXT:    sbexti a0, a0, 5
+; RV32IBS-NEXT:    ret
+  %shr = lshr i32 %a, 5
+  %and = and i32 %shr, 1
+  ret i32 %and
+}
+
+define i64 @sbexti_i64(i64 %a) nounwind {
+; RV32I-LABEL: sbexti_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a0, a0, 5
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: sbexti_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    sbexti a0, a0, 5
+; RV32IB-NEXT:    mv a1, zero
+; RV32IB-NEXT:    ret
+;
+; RV32IBS-LABEL: sbexti_i64:
+; RV32IBS:       # %bb.0:
+; RV32IBS-NEXT:    sbexti a0, a0, 5
+; RV32IBS-NEXT:    mv a1, zero
+; RV32IBS-NEXT:    ret
+  %shr = lshr i64 %a, 5
+  %and = and i64 %shr, 1
+  ret i64 %and
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64Zbs.ll b/llvm/test/CodeGen/RISCV/rv64Zbs.ll
new file mode 100644
index 000000000000..f7990b36dec8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64Zbs.ll
@@ -0,0 +1,235 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-b -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64IB
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbs -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64IBS
+
+define signext i32 @sbclr_i32(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sbclr_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    sllw a1, a2, a1
+; RV64I-NEXT:    not a1, a1
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbclr_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbclrw a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbclr_i32:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbclrw a0, a0, a1
+; RV64IBS-NEXT:    ret
+  %and = and i32 %b, 31
+  %shl = shl nuw i32 1, %and
+  %neg = xor i32 %shl, -1
+  %and1 = and i32 %neg, %a
+  ret i32 %and1
+}
+
+define i64 @sbclr_i64(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: sbclr_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    sll a1, a2, a1
+; RV64I-NEXT:    not a1, a1
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbclr_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbclr a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbclr_i64:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbclr a0, a0, a1
+; RV64IBS-NEXT:    ret
+  %and = and i64 %b, 63
+  %shl = shl nuw i64 1, %and
+  %neg = xor i64 %shl, -1
+  %and1 = and i64 %neg, %a
+  ret i64 %and1
+}
+
+define signext i32 @sbset_i32(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sbset_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    sllw a1, a2, a1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbset_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbsetw a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbset_i32:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbsetw a0, a0, a1
+; RV64IBS-NEXT:    ret
+  %and = and i32 %b, 31
+  %shl = shl nuw i32 1, %and
+  %or = or i32 %shl, %a
+  ret i32 %or
+}
+
+define i64 @sbset_i64(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: sbset_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    sll a1, a2, a1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbset_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbset a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbset_i64:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbset a0, a0, a1
+; RV64IBS-NEXT:    ret
+  %conv = and i64 %b, 63
+  %shl = shl nuw i64 1, %conv
+  %or = or i64 %shl, %a
+  ret i64 %or
+}
+
+define signext i32 @sbinv_i32(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sbinv_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    sllw a1, a2, a1
+; RV64I-NEXT:    xor a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbinv_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbinvw a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbinv_i32:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbinvw a0, a0, a1
+; RV64IBS-NEXT:    ret
+  %and = and i32 %b, 31
+  %shl = shl nuw i32 1, %and
+  %xor = xor i32 %shl, %a
+  ret i32 %xor
+}
+
+define i64 @sbinv_i64(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: sbinv_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    sll a1, a2, a1
+; RV64I-NEXT:    xor a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbinv_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbinv a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbinv_i64:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbinv a0, a0, a1
+; RV64IBS-NEXT:    ret
+  %conv = and i64 %b, 63
+  %shl = shl nuw i64 1, %conv
+  %xor = xor i64 %shl, %a
+  ret i64 %xor
+}
+
+define signext i32 @sbext_i32(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sbext_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbext_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbextw a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbext_i32:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbextw a0, a0, a1
+; RV64IBS-NEXT:    ret
+  %and = and i32 %b, 31
+  %shr = lshr i32 %a, %and
+  %and1 = and i32 %shr, 1
+  ret i32 %and1
+}
+
+define i64 @sbext_i64(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: sbext_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbext_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbext a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbext_i64:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbext a0, a0, a1
+; RV64IBS-NEXT:    ret
+  %conv = and i64 %b, 63
+  %shr = lshr i64 %a, %conv
+  %and1 = and i64 %shr, 1
+  ret i64 %and1
+}
+
+define signext i32 @sbexti_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: sbexti_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 5
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbexti_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbexti a0, a0, 5
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbexti_i32:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbexti a0, a0, 5
+; RV64IBS-NEXT:    ret
+  %shr = lshr i32 %a, 5
+  %and = and i32 %shr, 1
+  ret i32 %and
+}
+
+define i64 @sbexti_i64(i64 %a) nounwind {
+; RV64I-LABEL: sbexti_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 5
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: sbexti_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    sbexti a0, a0, 5
+; RV64IB-NEXT:    ret
+;
+; RV64IBS-LABEL: sbexti_i64:
+; RV64IBS:       # %bb.0:
+; RV64IBS-NEXT:    sbexti a0, a0, 5
+; RV64IBS-NEXT:    ret
+  %shr = lshr i64 %a, 5
+  %and = and i64 %shr, 1
+  ret i64 %and
+}


        


More information about the llvm-commits mailing list