[llvm-branch-commits] [llvm] 4d5aa76 - [RISCV] Add support for rev8 and orc.b to Zbb.

Craig Topper via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Fri Jan 22 12:56:21 PST 2021


Author: Craig Topper
Date: 2021-01-22T12:49:10-08:00
New Revision: 4d5aa760a7d78b601fcfbda4d6196091a9188ea6

URL: https://github.com/llvm/llvm-project/commit/4d5aa760a7d78b601fcfbda4d6196091a9188ea6
DIFF: https://github.com/llvm/llvm-project/commit/4d5aa760a7d78b601fcfbda4d6196091a9188ea6.diff

LOG: [RISCV] Add support for rev8 and orc.b to Zbb.

These instructions use a portion of the encodings for grevi and
gorci. The full encodings are only supported with Zbp. Note,
rev8 has a different encoding between rv32 and rv64.

Zbb is closer to being finalized that Zbp which has motivated
some decisions in this patch.

I'm treating rev8 and orc.b as separate instructions when
either Zbb or Zbp is enabled. This allows us to print to suggest
that either feature needs to be enabled to support these mnemonics.
I had tried to put HasStdExtZbbAndNotZbp on the Zbb instructions,
but that caused a diagnostic that said Zbp is required if neither
feature is enabled. We should really mention Zbb since its closer
to final.

This does require extra isel patterns for the different cases so
that bswap will always print as rev8 in assembly listing since
we can't use an InstAlias.

llvm-objdump disassembling should always pick the rev8 or orc.b
instructions. llvm-mc parsing and printing text will not convert
the grevi/gorci spellings to rev8/gorc.b. We could probably fix
this with a special case in processInstruction in the assembly
parser if it its important.

Reviewed By: asb, frasercrmck

Differential Revision: https://reviews.llvm.org/D94944

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoB.td
    llvm/test/CodeGen/RISCV/rv32Zbb.ll
    llvm/test/CodeGen/RISCV/rv64Zbb.ll
    llvm/test/MC/RISCV/rv32b-aliases-valid.s
    llvm/test/MC/RISCV/rv32zbb-valid.s
    llvm/test/MC/RISCV/rv32zbp-valid.s
    llvm/test/MC/RISCV/rv64b-aliases-valid.s
    llvm/test/MC/RISCV/rv64zbb-valid.s
    llvm/test/MC/RISCV/rv64zbp-valid.s

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f78e0a236b63..a24e10318c78 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -224,6 +224,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   }
 
   if (Subtarget.hasStdExtZbp()) {
+    // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
+    // more combining.
     setOperationAction(ISD::BITREVERSE, XLenVT, Custom);
     setOperationAction(ISD::BSWAP, XLenVT, Custom);
 
@@ -232,7 +234,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::BSWAP, MVT::i32, Custom);
     }
   } else {
-    setOperationAction(ISD::BSWAP, XLenVT, Expand);
+    // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
+    // pattern match it directly in isel.
+    setOperationAction(ISD::BSWAP, XLenVT,
+                       Subtarget.hasStdExtZbb() ? Legal : Expand);
   }
 
   if (Subtarget.hasStdExtZbb()) {

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
index 74660973e805..5a733c4bf771 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
@@ -481,6 +481,37 @@ def ZEXTH_RV64 : RVInstR<0b0000100, 0b100, OPC_OP_32, (outs GPR:$rd),
 }
 } // Predicates = [HasStdExtZbbOrZbp, IsRV64]
 
+// We treat rev8 and orc.b as standalone instructions even though they use a
+// portion of the encodings for grevi and gorci. This allows us to support only
+// those encodings when only Zbb is enabled. We do this even when grevi and
+// gorci are available with Zbp. Trying to use 'HasStdExtZbb, NotHasStdExtZbp'
+// causes diagnostics to suggest that Zbp rather than Zbb is required for rev8
+// or gorci. Since Zbb is closer to being finalized than Zbp this will be
+// misleading to users.
+let Predicates = [HasStdExtZbbOrZbp, IsRV32] in {
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def REV8_RV32 : RVInstI<0b101, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1),
+                        "rev8", "$rd, $rs1">, Sched<[]> {
+  let imm12 = { 0b01101, 0b0011000 };
+}
+} // Predicates = [HasStdExtZbbOrZbp, IsRV32]
+
+let Predicates = [HasStdExtZbbOrZbp, IsRV64] in {
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def REV8_RV64 : RVInstI<0b101, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1),
+                        "rev8", "$rd, $rs1">, Sched<[]> {
+  let imm12 = { 0b01101, 0b0111000 };
+}
+} // Predicates = [HasStdExtZbbOrZbp, IsRV64]
+
+let Predicates = [HasStdExtZbbOrZbp] in {
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def ORCB : RVInstI<0b101, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1),
+                   "orc.b", "$rd, $rs1">, Sched<[]> {
+  let imm12 = { 0b00101, 0b0000111 };
+}
+} // Predicates = [HasStdExtZbbOrZbp]
+
 //===----------------------------------------------------------------------===//
 // Future compressed instructions
 //===----------------------------------------------------------------------===//
@@ -552,7 +583,7 @@ def : InstAlias<"orc2.n $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b00010)>;
 def : InstAlias<"orc.n $rd, $rs",  (GORCI GPR:$rd, GPR:$rs, 0b00011)>;
 def : InstAlias<"orc4.b $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b00100)>;
 def : InstAlias<"orc2.b $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b00110)>;
-def : InstAlias<"orc.b $rd, $rs",  (GORCI GPR:$rd, GPR:$rs, 0b00111)>;
+// orc.b is considered an instruction rather than an alias.
 def : InstAlias<"orc8.h $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b01000)>;
 def : InstAlias<"orc4.h $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b01100)>;
 def : InstAlias<"orc2.h $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b01110)>;
@@ -561,7 +592,7 @@ def : InstAlias<"orc.h $rd, $rs",  (GORCI GPR:$rd, GPR:$rs, 0b01111)>;
 
 let Predicates = [HasStdExtZbp, IsRV32] in {
 def : InstAlias<"rev16 $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b10000)>;
-def : InstAlias<"rev8 $rd, $rs",  (GREVI GPR:$rd, GPR:$rs, 0b11000)>;
+// rev8 is considered an instruction rather than an alias.
 def : InstAlias<"rev4 $rd, $rs",  (GREVI GPR:$rd, GPR:$rs, 0b11100)>;
 def : InstAlias<"rev2 $rd, $rs",  (GREVI GPR:$rd, GPR:$rs, 0b11110)>;
 def : InstAlias<"rev $rd, $rs",   (GREVI GPR:$rd, GPR:$rs, 0b11111)>;
@@ -590,7 +621,7 @@ def : InstAlias<"rev2.w $rd, $rs",  (GREVI GPR:$rd, GPR:$rs, 0b011110)>;
 def : InstAlias<"rev.w $rd, $rs",   (GREVI GPR:$rd, GPR:$rs, 0b011111)>;
 def : InstAlias<"rev32 $rd, $rs",   (GREVI GPR:$rd, GPR:$rs, 0b100000)>;
 def : InstAlias<"rev16 $rd, $rs",   (GREVI GPR:$rd, GPR:$rs, 0b110000)>;
-def : InstAlias<"rev8 $rd, $rs",    (GREVI GPR:$rd, GPR:$rs, 0b111000)>;
+// rev8 is considered an instruction rather than an alias.
 def : InstAlias<"rev4 $rd, $rs",    (GREVI GPR:$rd, GPR:$rs, 0b111100)>;
 def : InstAlias<"rev2 $rd, $rs",    (GREVI GPR:$rd, GPR:$rs, 0b111110)>;
 def : InstAlias<"rev $rd, $rs",     (GREVI GPR:$rd, GPR:$rs, 0b111111)>;
@@ -716,13 +747,24 @@ def riscv_gorciw   : SDNode<"RISCVISD::GORCIW", SDTIntBinOp, []>;
 let Predicates = [HasStdExtZbp] in {
 def : Pat<(riscv_grevi GPR:$rs1, timm:$shamt), (GREVI GPR:$rs1, timm:$shamt)>;
 def : Pat<(riscv_gorci GPR:$rs1, timm:$shamt), (GORCI GPR:$rs1, timm:$shamt)>;
+
+// We treat orc.b as a separate instruction, so match it directly.
+def : Pat<(riscv_gorci GPR:$rs1, (XLenVT 7)), (ORCB GPR:$rs1)>;
 } // Predicates = [HasStdExtZbp]
 
 let Predicates = [HasStdExtZbp, IsRV32] in {
 def : Pat<(rotr (riscv_grevi GPR:$rs1, (i32 24)), (i32 16)), (GREVI GPR:$rs1, 8)>;
 def : Pat<(rotl (riscv_grevi GPR:$rs1, (i32 24)), (i32 16)), (GREVI GPR:$rs1, 8)>;
+
+// We treat rev8 as a separate instruction, so match it directly.
+def : Pat<(riscv_grevi GPR:$rs1, (i32 24)), (REV8_RV32 GPR:$rs1)>;
 } // Predicates = [HasStdExtZbp, IsRV32]
 
+let Predicates = [HasStdExtZbp, IsRV64] in {
+// We treat rev8 as a separate instruction, so match it directly.
+def : Pat<(riscv_grevi GPR:$rs1, (i64 56)), (REV8_RV64 GPR:$rs1)>;
+} // Predicates = [HasStdExtZbp, IsRV64]
+
 let Predicates = [HasStdExtZbt] in {
 def : Pat<(or (and (not GPR:$rs2), GPR:$rs3), (and GPR:$rs2, GPR:$rs1)),
           (CMIX GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
@@ -796,6 +838,14 @@ def : Pat<(umin GPR:$rs1, GPR:$rs2), (MINU GPR:$rs1, GPR:$rs2)>;
 def : Pat<(umax GPR:$rs1, GPR:$rs2), (MAXU GPR:$rs1, GPR:$rs2)>;
 } // Predicates = [HasStdExtZbb]
 
+let Predicates = [HasStdExtZbb, IsRV32] in {
+def : Pat<(bswap GPR:$rs1), (REV8_RV32 GPR:$rs1)>;
+} // Predicates = [HasStdExtZbb, IsRV32]
+
+let Predicates = [HasStdExtZbb, IsRV64] in {
+def : Pat<(bswap GPR:$rs1), (REV8_RV64 GPR:$rs1)>;
+} // Predicates = [HasStdExtZbb, IsRV64]
+
 let Predicates = [HasStdExtZbp, IsRV32] in
 def : Pat<(or (and GPR:$rs1, 0x0000FFFF), (shl GPR:$rs2, (i32 16))),
           (PACK GPR:$rs1, GPR:$rs2)>;

diff  --git a/llvm/test/CodeGen/RISCV/rv32Zbb.ll b/llvm/test/CodeGen/RISCV/rv32Zbb.ll
index 8d773d2a7ac0..1ab4dc5c430f 100644
--- a/llvm/test/CodeGen/RISCV/rv32Zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32Zbb.ll
@@ -962,3 +962,81 @@ define i64 @zexth_i64(i64 %a) nounwind {
   %and = and i64 %a, 65535
   ret i64 %and
 }
+
+declare i32 @llvm.bswap.i32(i32)
+
+define i32 @bswap_i32(i32 %a) nounwind {
+; RV32I-LABEL: bswap_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -256
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 24
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    slli a2, a0, 8
+; RV32I-NEXT:    lui a3, 4080
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: bswap_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    rev8 a0, a0
+; RV32IB-NEXT:    ret
+;
+; RV32IBB-LABEL: bswap_i32:
+; RV32IBB:       # %bb.0:
+; RV32IBB-NEXT:    rev8 a0, a0
+; RV32IBB-NEXT:    ret
+  %1 = tail call i32 @llvm.bswap.i32(i32 %a)
+  ret i32 %1
+}
+
+declare i64 @llvm.bswap.i64(i64)
+
+define i64 @bswap_i64(i64 %a) {
+; RV32I-LABEL: bswap_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a2, a1, 8
+; RV32I-NEXT:    lui a3, 16
+; RV32I-NEXT:    addi a3, a3, -256
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    srli a4, a1, 24
+; RV32I-NEXT:    or a2, a2, a4
+; RV32I-NEXT:    slli a4, a1, 8
+; RV32I-NEXT:    lui a5, 4080
+; RV32I-NEXT:    and a4, a4, a5
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    or a1, a1, a4
+; RV32I-NEXT:    or a2, a1, a2
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    srli a3, a0, 24
+; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    slli a3, a0, 8
+; RV32I-NEXT:    and a3, a3, a5
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a1, a0, a1
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: bswap_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    rev8 a2, a1
+; RV32IB-NEXT:    rev8 a1, a0
+; RV32IB-NEXT:    mv a0, a2
+; RV32IB-NEXT:    ret
+;
+; RV32IBB-LABEL: bswap_i64:
+; RV32IBB:       # %bb.0:
+; RV32IBB-NEXT:    rev8 a2, a1
+; RV32IBB-NEXT:    rev8 a1, a0
+; RV32IBB-NEXT:    mv a0, a2
+; RV32IBB-NEXT:    ret
+  %1 = call i64 @llvm.bswap.i64(i64 %a)
+  ret i64 %1
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64Zbb.ll b/llvm/test/CodeGen/RISCV/rv64Zbb.ll
index 5fd8135e6786..64074a31bea4 100644
--- a/llvm/test/CodeGen/RISCV/rv64Zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64Zbb.ll
@@ -819,3 +819,127 @@ define i64 @zexth_i64(i64 %a) nounwind {
   %and = and i64 %a, 65535
   ret i64 %and
 }
+
+declare i32 @llvm.bswap.i32(i32)
+
+define signext i32 @bswap_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: bswap_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 8
+; RV64I-NEXT:    addi a2, zero, 255
+; RV64I-NEXT:    slli a3, a2, 32
+; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    slli a3, a0, 24
+; RV64I-NEXT:    slli a4, a2, 40
+; RV64I-NEXT:    and a3, a3, a4
+; RV64I-NEXT:    or a1, a3, a1
+; RV64I-NEXT:    slli a3, a0, 40
+; RV64I-NEXT:    slli a2, a2, 48
+; RV64I-NEXT:    and a2, a3, a2
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    srai a0, a0, 32
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: bswap_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    greviw a0, a0, 24
+; RV64IB-NEXT:    ret
+;
+; RV64IBB-LABEL: bswap_i32:
+; RV64IBB:       # %bb.0:
+; RV64IBB-NEXT:    rev8 a0, a0
+; RV64IBB-NEXT:    srai a0, a0, 32
+; RV64IBB-NEXT:    ret
+  %1 = tail call i32 @llvm.bswap.i32(i32 %a)
+  ret i32 %1
+}
+
+; Similar to bswap_i32 but the result is not sign extended.
+define void @bswap_i32_nosext(i32 signext %a, i32* %x) nounwind {
+; RV64I-LABEL: bswap_i32_nosext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    addi a3, zero, 255
+; RV64I-NEXT:    slli a4, a3, 32
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 24
+; RV64I-NEXT:    slli a5, a3, 40
+; RV64I-NEXT:    and a4, a4, a5
+; RV64I-NEXT:    or a2, a4, a2
+; RV64I-NEXT:    slli a4, a0, 40
+; RV64I-NEXT:    slli a3, a3, 48
+; RV64I-NEXT:    and a3, a4, a3
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    or a0, a0, a3
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sw a0, 0(a1)
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: bswap_i32_nosext:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    greviw a0, a0, 24
+; RV64IB-NEXT:    sw a0, 0(a1)
+; RV64IB-NEXT:    ret
+;
+; RV64IBB-LABEL: bswap_i32_nosext:
+; RV64IBB:       # %bb.0:
+; RV64IBB-NEXT:    rev8 a0, a0
+; RV64IBB-NEXT:    srli a0, a0, 32
+; RV64IBB-NEXT:    sw a0, 0(a1)
+; RV64IBB-NEXT:    ret
+  %1 = tail call i32 @llvm.bswap.i32(i32 %a)
+  store i32 %1, i32* %x
+  ret void
+}
+
+declare i64 @llvm.bswap.i64(i64)
+
+define i64 @bswap_i64(i64 %a) {
+; RV64I-LABEL: bswap_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a1, a0, 24
+; RV64I-NEXT:    lui a2, 4080
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 8
+; RV64I-NEXT:    addi a3, zero, 255
+; RV64I-NEXT:    slli a4, a3, 24
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    or a1, a2, a1
+; RV64I-NEXT:    srli a2, a0, 40
+; RV64I-NEXT:    lui a4, 16
+; RV64I-NEXT:    addiw a4, a4, -256
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    srli a4, a0, 56
+; RV64I-NEXT:    or a2, a2, a4
+; RV64I-NEXT:    or a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    slli a4, a3, 32
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 24
+; RV64I-NEXT:    slli a5, a3, 40
+; RV64I-NEXT:    and a4, a4, a5
+; RV64I-NEXT:    or a2, a4, a2
+; RV64I-NEXT:    slli a4, a0, 40
+; RV64I-NEXT:    slli a3, a3, 48
+; RV64I-NEXT:    and a3, a4, a3
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    or a0, a0, a3
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: bswap_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    rev8 a0, a0
+; RV64IB-NEXT:    ret
+;
+; RV64IBB-LABEL: bswap_i64:
+; RV64IBB:       # %bb.0:
+; RV64IBB-NEXT:    rev8 a0, a0
+; RV64IBB-NEXT:    ret
+  %1 = call i64 @llvm.bswap.i64(i64 %a)
+  ret i64 %1
+}

diff  --git a/llvm/test/MC/RISCV/rv32b-aliases-valid.s b/llvm/test/MC/RISCV/rv32b-aliases-valid.s
index a291c381109a..c5ea0ca697be 100644
--- a/llvm/test/MC/RISCV/rv32b-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv32b-aliases-valid.s
@@ -67,7 +67,7 @@ rev.h x5, x6
 # CHECK-S-OBJ: rev16 t0, t1
 rev16 x5, x6
 
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 24
+# CHECK-S-OBJ-NOALIAS: rev8 t0, t1
 # CHECK-S-OBJ: rev8 t0, t1
 rev8 x5, x6
 
@@ -183,7 +183,7 @@ orc4.b x5, x6
 # CHECK-S-OBJ: orc2.b t0, t1
 orc2.b x5, x6
 
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 7
+# CHECK-S-OBJ-NOALIAS: orc.b t0, t1
 # CHECK-S-OBJ: orc.b t0, t1
 orc.b x5, x6
 

diff  --git a/llvm/test/MC/RISCV/rv32zbb-valid.s b/llvm/test/MC/RISCV/rv32zbb-valid.s
index 28c38436274e..a756814dc5bc 100644
--- a/llvm/test/MC/RISCV/rv32zbb-valid.s
+++ b/llvm/test/MC/RISCV/rv32zbb-valid.s
@@ -42,3 +42,9 @@ maxu t0, t1, t2
 # CHECK-ASM-AND-OBJ: zext.h t0, t1
 # CHECK-ASM: encoding: [0xb3,0x42,0x03,0x08]
 zext.h t0, t1
+# CHECK-ASM-AND-OBJ: rev8 t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x83,0x69]
+rev8 t0, t1
+# CHECK-ASM-AND-OBJ: orc.b t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x73,0x28]
+orc.b t0, t1

diff  --git a/llvm/test/MC/RISCV/rv32zbp-valid.s b/llvm/test/MC/RISCV/rv32zbp-valid.s
index 270c8e242193..3ca28573addd 100644
--- a/llvm/test/MC/RISCV/rv32zbp-valid.s
+++ b/llvm/test/MC/RISCV/rv32zbp-valid.s
@@ -64,3 +64,17 @@ zext.h t0, t1
 # CHECK-OBJ: zext.h t0, t1
 # CHECK-ASM: encoding: [0xb3,0x42,0x03,0x08]
 pack t0, t1, x0
+# CHECK-ASM-AND-OBJ: rev8 t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x83,0x69]
+rev8 t0, t1
+# CHECK-ASM: grevi t0, t1, 24
+# CHECK-OBJ: rev8 t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x83,0x69]
+grevi t0, t1, 24
+# CHECK-ASM-AND-OBJ: orc.b t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x73,0x28]
+orc.b t0, t1
+# CHECK-ASM: gorci t0, t1, 7
+# CHECK-OBJ: orc.b t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x73,0x28]
+gorci t0, t1, 7

diff  --git a/llvm/test/MC/RISCV/rv64b-aliases-valid.s b/llvm/test/MC/RISCV/rv64b-aliases-valid.s
index eb9b7b66986c..203bcbbe7ab5 100644
--- a/llvm/test/MC/RISCV/rv64b-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv64b-aliases-valid.s
@@ -95,7 +95,7 @@ rev32 x5, x6
 # CHECK-S-OBJ: rev16 t0, t1
 rev16 x5, x6
 
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 56
+# CHECK-S-OBJ-NOALIAS: rev8 t0, t1
 # CHECK-S-OBJ: rev8 t0, t1
 rev8 x5, x6
 
@@ -251,7 +251,7 @@ orc4.b x5, x6
 # CHECK-S-OBJ: orc2.b t0, t1
 orc2.b x5, x6
 
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 7
+# CHECK-S-OBJ-NOALIAS: orc.b t0, t1
 # CHECK-S-OBJ: orc.b t0, t1
 orc.b x5, x6
 

diff  --git a/llvm/test/MC/RISCV/rv64zbb-valid.s b/llvm/test/MC/RISCV/rv64zbb-valid.s
index c2809ea0ca72..9e4678573298 100644
--- a/llvm/test/MC/RISCV/rv64zbb-valid.s
+++ b/llvm/test/MC/RISCV/rv64zbb-valid.s
@@ -24,3 +24,9 @@ cpopw t0, t1
 # CHECK-ASM-AND-OBJ: zext.h t0, t1
 # CHECK-ASM: encoding: [0xbb,0x42,0x03,0x08]
 zext.h t0, t1
+# CHECK-ASM-AND-OBJ: rev8 t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x83,0x6b]
+rev8 t0, t1
+# CHECK-ASM-AND-OBJ: orc.b t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x73,0x28]
+orc.b t0, t1

diff  --git a/llvm/test/MC/RISCV/rv64zbp-valid.s b/llvm/test/MC/RISCV/rv64zbp-valid.s
index 9a6cca1e62b3..ec4e918173b3 100644
--- a/llvm/test/MC/RISCV/rv64zbp-valid.s
+++ b/llvm/test/MC/RISCV/rv64zbp-valid.s
@@ -55,3 +55,17 @@ zext.h t0, t1
 # CHECK-OBJ: zext.h t0, t1
 # CHECK-ASM: encoding: [0xbb,0x42,0x03,0x08]
 packw t0, t1, x0
+# CHECK-ASM-AND-OBJ: rev8 t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x83,0x6b]
+rev8 t0, t1
+# CHECK-ASM: grevi t0, t1, 56
+# CHECK-OBJ: rev8 t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x83,0x6b]
+grevi t0, t1, 56
+# CHECK-ASM-AND-OBJ: orc.b t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x73,0x28]
+orc.b t0, t1
+# CHECK-ASM: gorci t0, t1, 7
+# CHECK-OBJ: orc.b t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x73,0x28]
+gorci t0, t1, 7


        


More information about the llvm-branch-commits mailing list