[llvm] 615d71d - [RISCV][CodeGen] Implement IR Intrinsic support for K extension

Wu Xinlong via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 26 23:53:43 PST 2022


Author: Wu Xinlong
Date: 2022-01-27T15:53:35+08:00
New Revision: 615d71d9a3400fe66fa066c7ef63a0a467171810

URL: https://github.com/llvm/llvm-project/commit/615d71d9a3400fe66fa066c7ef63a0a467171810
DIFF: https://github.com/llvm/llvm-project/commit/615d71d9a3400fe66fa066c7ef63a0a467171810.diff

LOG: [RISCV][CodeGen] Implement IR Intrinsic support for K extension

This revision implements IR Intrinsic support for RISCV Scalar Crypto extension according to the specification of version [[ https://github.com/riscv/riscv-crypto/releases/tag/v1.0.0-scalar | 1.0]]
Co-author:@ksyx & @VincentWu & @lihongliang & @achieveartificialintelligence

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D102310

Added: 
    llvm/test/CodeGen/RISCV/rv32zbb-zbp-zbkb.ll
    llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv32zbp-zbkb.ll
    llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv64zbb-zbp-zbkb.ll
    llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv64zbp-zbkb.ll
    llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCV.td
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
    llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv32zbp.ll
    llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv64zbp.ll

Removed: 
    llvm/test/CodeGen/RISCV/rv32zbb-zbp.ll
    llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index fc697f5c71e85..6780436bd701a 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -88,9 +88,11 @@ let TargetPrefix = "riscv" in {
   // Zbb
   def int_riscv_orc_b : BitManipGPRIntrinsics;
 
-  // Zbc
+  // Zbc or Zbkc
   def int_riscv_clmul  : BitManipGPRGPRIntrinsics;
   def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
+
+  // Zbc
   def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
 
   // Zbe
@@ -123,6 +125,15 @@ let TargetPrefix = "riscv" in {
   // Zbt
   def int_riscv_fsl : BitManipGPRGPRGRIntrinsics;
   def int_riscv_fsr : BitManipGPRGPRGRIntrinsics;
+
+  // Zbkb
+  def int_riscv_brev8 : BitManipGPRIntrinsics;
+  def int_riscv_zip   : BitManipGPRIntrinsics;
+  def int_riscv_unzip : BitManipGPRIntrinsics;
+
+  // Zbkx
+  def int_riscv_xperm4  : BitManipGPRGPRIntrinsics;
+  def int_riscv_xperm8  : BitManipGPRGPRIntrinsics;
 } // TargetPrefix = "riscv"
 
 //===----------------------------------------------------------------------===//
@@ -1453,3 +1464,92 @@ let TargetPrefix = "riscv" in {
                      llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>;
 } // TargetPrefix = "riscv"
+
+//===----------------------------------------------------------------------===//
+// Scalar Cryptography
+//
+// These intrinsics will lower directly into the corresponding instructions
+// added by the scalar cyptography extension, if the extension is present.
+
+let TargetPrefix = "riscv" in {
+
+class ScalarCryptoGprIntrinsicAny
+    : Intrinsic<[llvm_anyint_ty],
+                [LLVMMatchType<0>],
+                [IntrNoMem, IntrSpeculatable]>;
+
+class ScalarCryptoByteSelect32
+    : Intrinsic<[llvm_i32_ty],
+                [llvm_i32_ty, llvm_i32_ty, llvm_i8_ty],
+                [IntrNoMem, IntrWillReturn, IntrSpeculatable,
+                 ImmArg<ArgIndex<2>>]>;
+
+class ScalarCryptoGprGprIntrinsic32
+    : Intrinsic<[llvm_i32_ty],
+                [llvm_i32_ty, llvm_i32_ty],
+                [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
+
+class ScalarCryptoGprGprIntrinsic64
+    : Intrinsic<[llvm_i64_ty],
+                [llvm_i64_ty, llvm_i64_ty],
+                [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
+
+class ScalarCryptoGprIntrinsic64
+    : Intrinsic<[llvm_i64_ty],
+                [llvm_i64_ty],
+                [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
+
+class ScalarCryptoByteSelectAny
+    : Intrinsic<[llvm_anyint_ty],
+                [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i8_ty],
+                [IntrNoMem, IntrSpeculatable, IntrWillReturn,
+                 ImmArg<ArgIndex<2>>, Returned<ArgIndex<0>>]>;
+
+// Zknd
+def int_riscv_aes32dsi  : ScalarCryptoByteSelect32;
+def int_riscv_aes32dsmi : ScalarCryptoByteSelect32;
+
+def int_riscv_aes64ds   : ScalarCryptoGprGprIntrinsic64;
+def int_riscv_aes64dsm  : ScalarCryptoGprGprIntrinsic64;
+
+def int_riscv_aes64im   : ScalarCryptoGprIntrinsic64;
+
+// Zkne
+def int_riscv_aes32esi  : ScalarCryptoByteSelect32;
+def int_riscv_aes32esmi : ScalarCryptoByteSelect32;
+
+def int_riscv_aes64es   : ScalarCryptoGprGprIntrinsic64;
+def int_riscv_aes64esm  : ScalarCryptoGprGprIntrinsic64;
+
+// Zknd & Zkne
+def int_riscv_aes64ks2  : ScalarCryptoGprGprIntrinsic64;
+def int_riscv_aes64ks1i : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
+                                    [IntrNoMem, IntrSpeculatable,
+                                     IntrWillReturn, ImmArg<ArgIndex<1>>]>;
+
+// Zknh
+def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsicAny;
+def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsicAny;
+def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsicAny;
+def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsicAny;
+
+def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32;
+def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32;
+def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32;
+def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32;
+def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32;
+def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32;
+
+def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64;
+def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64;
+def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64;
+def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64;
+
+// Zksed
+def int_riscv_sm4ks      : ScalarCryptoByteSelectAny;
+def int_riscv_sm4ed      : ScalarCryptoByteSelectAny;
+
+// Zksh
+def int_riscv_sm3p0      : ScalarCryptoGprIntrinsicAny;
+def int_riscv_sm3p1      : ScalarCryptoGprIntrinsicAny;
+} // TargetPrefix = "riscv"

diff  --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td
index 0462a26a0c55e..5b0f27c5e9370 100644
--- a/llvm/lib/Target/RISCV/RISCV.td
+++ b/llvm/lib/Target/RISCV/RISCV.td
@@ -169,6 +169,12 @@ def HasStdExtZbpOrZbkb
                                    "'Zbp' (Permutation 'Zb' Instructions) or "
                                    "'Zbkb' (Bitmanip instructions for Cryptography)">;
 
+def HasStdExtZbbOrZbkb
+    : Predicate<"Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbkb()">,
+                AssemblerPredicate<(any_of FeatureStdExtZbb, FeatureStdExtZbkb),
+                                   "'Zbb' (Basic Bit-Manipulation) or "
+                                   "'Zbkb' (Bitmanip instructions for Cryptography)">;
+
 def HasStdExtZbbOrZbpOrZbkb
     : Predicate<"Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp() || Subtarget->hasStdExtZbkb()">,
                 AssemblerPredicate<(any_of FeatureStdExtZbb, FeatureStdExtZbp, FeatureStdExtZbkb),

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index c71f1a698563d..205f71a6fe478 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -250,7 +250,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
 
-  if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
+  if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
+      Subtarget.hasStdExtZbkb()) {
     if (Subtarget.is64Bit()) {
       setOperationAction(ISD::ROTL, MVT::i32, Custom);
       setOperationAction(ISD::ROTR, MVT::i32, Custom);
@@ -278,7 +279,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
     // pattern match it directly in isel.
     setOperationAction(ISD::BSWAP, XLenVT,
-                       Subtarget.hasStdExtZbb() ? Legal : Expand);
+                       (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
+                           ? Legal
+                           : Expand);
   }
 
   if (Subtarget.hasStdExtZbb()) {
@@ -1232,7 +1235,8 @@ bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
   if (VT.isVector())
     return false;
 
-  return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) &&
+  return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
+          Subtarget.hasStdExtZbkb()) &&
          !isa<ConstantSDNode>(Y);
 }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 75f9ec98cc1aa..db3f5851879a7 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -794,16 +794,16 @@ def : InstAlias<"bext $rd, $rs1, $shamt",
 // Codegen patterns
 //===----------------------------------------------------------------------===//
 
-let Predicates = [HasStdExtZbbOrZbp] in {
+let Predicates = [HasStdExtZbbOrZbpOrZbkb] in {
 def : Pat<(and GPR:$rs1, (not GPR:$rs2)), (ANDN GPR:$rs1, GPR:$rs2)>;
 def : Pat<(or  GPR:$rs1, (not GPR:$rs2)), (ORN  GPR:$rs1, GPR:$rs2)>;
 def : Pat<(xor GPR:$rs1, (not GPR:$rs2)), (XNOR GPR:$rs1, GPR:$rs2)>;
-} // Predicates = [HasStdExtZbbOrZbp]
+} // Predicates = [HasStdExtZbbOrZbpOrZbkb]
 
-let Predicates = [HasStdExtZbbOrZbp] in {
+let Predicates = [HasStdExtZbbOrZbpOrZbkb] in {
 def : PatGprGpr<rotl, ROL>;
 def : PatGprGpr<rotr, ROR>;
-} // Predicates = [HasStdExtZbbOrZbp]
+} // Predicates = [HasStdExtZbbOrZbpOrZbkb]
 
 let Predicates = [HasStdExtZbs] in {
 def : Pat<(and (not (shiftop<shl> 1, GPR:$rs2)), GPR:$rs1),
@@ -854,7 +854,7 @@ def : Pat<(and GPR:$r, BCLRIANDIMask:$i),
 
 // There's no encoding for roli in the the 'B' extension as it can be
 // implemented with rori by negating the immediate.
-let Predicates = [HasStdExtZbbOrZbp] in {
+let Predicates = [HasStdExtZbbOrZbpOrZbkb] in {
 def : PatGprImm<rotr, RORI, uimmlog2xlen>;
 def : Pat<(rotl GPR:$rs1, uimmlog2xlen:$shamt),
           (RORI GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>;
@@ -960,35 +960,38 @@ def : PatGprGpr<umin, MINU>;
 def : PatGprGpr<umax, MAXU>;
 } // Predicates = [HasStdExtZbb]
 
-let Predicates = [HasStdExtZbb, IsRV32] in {
+let Predicates = [HasStdExtZbbOrZbkb, IsRV32] in {
 def : Pat<(i32 (bswap GPR:$rs1)), (REV8_RV32 GPR:$rs1)>;
-} // Predicates = [HasStdExtZbb, IsRV32]
+} // Predicates = [HasStdExtZbbOrZbkb, IsRV32]
 
-let Predicates = [HasStdExtZbb, IsRV64] in {
+let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
 def : Pat<(i64 (bswap GPR:$rs1)), (REV8_RV64 GPR:$rs1)>;
-} // Predicates = [HasStdExtZbb, IsRV64]
+} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
 
-let Predicates = [HasStdExtZbp, IsRV32] in {
+let Predicates = [HasStdExtZbpOrZbkb, IsRV32] in
 def : Pat<(i32 (or (and GPR:$rs1, 0x0000FFFF), (shl GPR:$rs2, (i32 16)))),
           (PACK GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbp, IsRV32] in
 def : Pat<(i32 (or (and GPR:$rs2, 0xFFFF0000), (srl GPR:$rs1, (i32 16)))),
           (PACKU GPR:$rs1, GPR:$rs2)>;
 
-}
-let Predicates = [HasStdExtZbp, IsRV64] in {
+let Predicates = [HasStdExtZbpOrZbkb, IsRV64] in
 def : Pat<(i64 (or (and GPR:$rs1, 0x00000000FFFFFFFF), (shl GPR:$rs2, (i64 32)))),
           (PACK GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbp, IsRV64] in
 def : Pat<(i64 (or (and GPR:$rs2, 0xFFFFFFFF00000000), (srl GPR:$rs1, (i64 32)))),
           (PACKU GPR:$rs1, GPR:$rs2)>;
-}
-let Predicates = [HasStdExtZbp] in {
+
+let Predicates = [HasStdExtZbpOrZbkb] in {
 def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFFFF),
               (and GPR:$rs1, 0x00FF)),
           (PACKH GPR:$rs1, GPR:$rs2)>;
 def : Pat<(or (shl (and GPR:$rs2, 0x00FF), (XLenVT 8)),
               (and GPR:$rs1, 0x00FF)),
           (PACKH GPR:$rs1, GPR:$rs2)>;
-}
+} // Predicates = [HasStdExtZbpOrZbkb]
 
 let Predicates = [HasStdExtZbbOrZbp, IsRV32] in
 def : Pat<(i32 (and GPR:$rs, 0xFFFF)), (ZEXTH_RV32 GPR:$rs)>;
@@ -1091,13 +1094,13 @@ def : Pat<(i64 (add (and (shl GPR:$rs1, (i64 3)), 0x7FFFFFFFF), non_imm12:$rs2))
           (SH3ADDUW GPR:$rs1, GPR:$rs2)>;
 } // Predicates = [HasStdExtZba, IsRV64]
 
-let Predicates = [HasStdExtZbbOrZbp, IsRV64] in {
+let Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64] in {
 def : PatGprGpr<riscv_rolw, ROLW>;
 def : PatGprGpr<riscv_rorw, RORW>;
 def : PatGprImm<riscv_rorw, RORIW, uimm5>;
 def : Pat<(riscv_rolw GPR:$rs1, uimm5:$rs2),
           (RORIW GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>;
-} // Predicates = [HasStdExtZbbOrZbp, IsRV64]
+} // Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64]
 
 let Predicates = [HasStdExtZbp, IsRV64] in {
 def : Pat<(riscv_rorw (riscv_grevw GPR:$rs1, 24), 16), (GREVIW GPR:$rs1, 8)>;
@@ -1129,7 +1132,7 @@ def : PatGpr<riscv_ctzw, CTZW>;
 def : Pat<(i64 (ctpop (i64 (zexti32 (i64 GPR:$rs1))))), (CPOPW GPR:$rs1)>;
 } // Predicates = [HasStdExtZbb, IsRV64]
 
-let Predicates = [HasStdExtZbp, IsRV64] in {
+let Predicates = [HasStdExtZbpOrZbkb, IsRV64] in {
 def : Pat<(i64 (sext_inreg (or (shl GPR:$rs2, (i64 16)),
                                (and GPR:$rs1, 0x000000000000FFFF)),
                            i32)),
@@ -1137,16 +1140,21 @@ def : Pat<(i64 (sext_inreg (or (shl GPR:$rs2, (i64 16)),
 def : Pat<(i64 (or (sext_inreg (shl GPR:$rs2, (i64 16)), i32),
                    (and GPR:$rs1, 0x000000000000FFFF))),
           (PACKW GPR:$rs1, GPR:$rs2)>;
+}
+
+let Predicates = [HasStdExtZbp, IsRV64] in
 def : Pat<(i64 (or (and (assertsexti32 GPR:$rs2), 0xFFFFFFFFFFFF0000),
                    (srl (and GPR:$rs1, 0xFFFFFFFF), (i64 16)))),
           (PACKUW GPR:$rs1, GPR:$rs2)>;
-} // Predicates = [HasStdExtZbp, IsRV64]
 
-let Predicates = [HasStdExtZbc] in {
+
+let Predicates = [HasStdExtZbcOrZbkc] in {
 def : PatGprGpr<int_riscv_clmul, CLMUL>;
 def : PatGprGpr<int_riscv_clmulh, CLMULH>;
+} // Predicates = [HasStdExtZbcOrZbkc]
+
+let Predicates = [HasStdExtZbc] in
 def : PatGprGpr<int_riscv_clmulr, CLMULR>;
-} // Predicates = [HasStdExtZbc]
 
 let Predicates = [HasStdExtZbe] in {
 def : PatGprGpr<riscv_bcompress, BCOMPRESS>;
@@ -1177,3 +1185,17 @@ def : PatGprGpr<riscv_bfp, BFP>;
 
 let Predicates = [HasStdExtZbf, IsRV64] in
 def : PatGprGpr<riscv_bfpw, BFPW>;
+
+let Predicates = [HasStdExtZbkb] in {
+def : PatGpr<int_riscv_brev8, BREV8>;
+} // Predicates = [HasStdExtZbkb]
+
+let Predicates = [HasStdExtZbkb, IsRV32] in {
+def : PatGpr<int_riscv_zip, ZIP_RV32>;
+def : PatGpr<int_riscv_unzip, UNZIP_RV32>;
+} // Predicates = [HasStdExtZbkb, IsRV32]
+
+let Predicates = [HasStdExtZbkx] in {
+def : PatGprGpr<int_riscv_xperm4, XPERMN>;
+def : PatGprGpr<int_riscv_xperm8, XPERMB>;
+}

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
index 52a29526a541a..4a41cddedc715 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
@@ -21,7 +21,7 @@ def RnumArg : AsmOperandClass {
   let DiagnosticType = "InvalidRnumArg";
 }
 
-def rnum : Operand<XLenVT>, ImmLeaf<XLenVT, [{return (Imm >= 0 && Imm <= 10);}]> {
+def rnum : Operand<i32>, TImmLeaf<i32, [{return (Imm >= 0 && Imm <= 10);}]> {
   let ParserMatchClass = RnumArg;
   let EncoderMethod = "getImmOpValue";
   let DecoderMethod = "decodeUImmOperand<4>";
@@ -29,6 +29,13 @@ def rnum : Operand<XLenVT>, ImmLeaf<XLenVT, [{return (Imm >= 0 && Imm <= 10);}]>
   let OperandNamespace = "RISCVOp";
 }
 
+def byteselect : Operand<i8>, TImmLeaf<i8, [{return isUInt<2>(Imm);}]> {
+  let ParserMatchClass = UImmAsmOperand<2>;
+  let DecoderMethod = "decodeUImmOperand<2>";
+  let OperandType = "OPERAND_UIMM2";
+  let OperandNamespace = "RISCVOp";
+}
+
 //===----------------------------------------------------------------------===//
 // Instruction class templates
 //===----------------------------------------------------------------------===//
@@ -42,7 +49,7 @@ class RVKUnary<bits<12> imm12_in, bits<3> funct3, string opcodestr>
 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
 class RVKByteSelect<bits<5> funct5, string opcodestr>
     : RVInstR<{0b00, funct5}, 0b000, OPC_OP, (outs GPR:$rd),
-              (ins GPR:$rs1, GPR:$rs2, uimm2:$bs),
+              (ins GPR:$rs1, GPR:$rs2, byteselect:$bs),
               opcodestr, "$rd, $rs1, $rs2, $bs">{
   bits<2> bs;
   let Inst{31-30} = bs;
@@ -121,3 +128,76 @@ let Predicates = [HasStdExtZksh] in {
 def SM3P0 : RVKUnary<0b000100001000, 0b001, "sm3p0">;
 def SM3P1 : RVKUnary<0b000100001001, 0b001, "sm3p1">;
 } // Predicates = [HasStdExtZksh]
+
+//===----------------------------------------------------------------------===//
+// Codegen patterns
+//===----------------------------------------------------------------------===//
+
+class PatGprGprByteSelect<SDPatternOperator OpNode, RVInst Inst>
+    : Pat<(OpNode GPR:$rs1, GPR:$rs2, i8:$imm),
+          (Inst GPR:$rs1, GPR:$rs2, byteselect:$imm)>;
+
+// Zknd
+let Predicates = [HasStdExtZknd, IsRV32] in {
+def : PatGprGprByteSelect<int_riscv_aes32dsi, AES32DSI>;
+def : PatGprGprByteSelect<int_riscv_aes32dsmi, AES32DSMI>;
+} // Predicates = [HasStdExtZknd, IsRV32]
+
+let Predicates = [HasStdExtZknd, IsRV64] in {
+def : PatGprGpr<int_riscv_aes64ds, AES64DS>;
+def : PatGprGpr<int_riscv_aes64dsm, AES64DSM>;
+def : PatGpr<int_riscv_aes64im, AES64IM>;
+} // Predicates = [HasStdExtZknd, IsRV64]
+
+let Predicates = [HasStdExtZkndOrZkne, IsRV64] in {
+def : PatGprGpr<int_riscv_aes64ks2, AES64KS2>;
+def : Pat<(int_riscv_aes64ks1i GPR:$rs1, i32:$rnum),
+          (AES64KS1I GPR:$rs1, rnum:$rnum)>;
+} // Predicates = [HasStdExtZkndOrZkne, IsRV64]
+
+// Zkne
+let Predicates = [HasStdExtZkne, IsRV32] in {
+def : PatGprGprByteSelect<int_riscv_aes32esi, AES32ESI>;
+def : PatGprGprByteSelect<int_riscv_aes32esmi, AES32ESMI>;
+} // Predicates = [HasStdExtZkne, IsRV32]
+
+let Predicates = [HasStdExtZkne, IsRV64] in {
+def : PatGprGpr<int_riscv_aes64es, AES64ES>;
+def : PatGprGpr<int_riscv_aes64esm, AES64ESM>;
+} // Predicates = [HasStdExtZkne, IsRV64]
+
+// Zknh
+let Predicates = [HasStdExtZknh] in {
+def : PatGpr<int_riscv_sha256sig0, SHA256SIG0>;
+def : PatGpr<int_riscv_sha256sig1, SHA256SIG1>;
+def : PatGpr<int_riscv_sha256sum0, SHA256SUM0>;
+def : PatGpr<int_riscv_sha256sum1, SHA256SUM1>;
+} // Predicates = [HasStdExtZknh]
+
+let Predicates = [HasStdExtZknh, IsRV32] in {
+def : PatGprGpr<int_riscv_sha512sig0l, SHA512SIG0L>;
+def : PatGprGpr<int_riscv_sha512sig0h, SHA512SIG0H>;
+def : PatGprGpr<int_riscv_sha512sig1l, SHA512SIG1L>;
+def : PatGprGpr<int_riscv_sha512sig1h, SHA512SIG1H>;
+def : PatGprGpr<int_riscv_sha512sum0r, SHA512SUM0R>;
+def : PatGprGpr<int_riscv_sha512sum1r, SHA512SUM1R>;
+} // Predicates = [HasStdExtZknh, IsRV32]
+
+let Predicates = [HasStdExtZknh, IsRV64] in {
+def : PatGpr<int_riscv_sha512sig0, SHA512SIG0>;
+def : PatGpr<int_riscv_sha512sig1, SHA512SIG1>;
+def : PatGpr<int_riscv_sha512sum0, SHA512SUM0>;
+def : PatGpr<int_riscv_sha512sum1, SHA512SUM1>;
+} // Predicates = [HasStdExtZknh, IsRV64]
+
+// Zksed
+let Predicates = [HasStdExtZksed] in {
+def : PatGprGprByteSelect<int_riscv_sm4ks, SM4KS>;
+def : PatGprGprByteSelect<int_riscv_sm4ed, SM4ED>;
+} // Predicates = [HasStdExtZksed]
+
+// Zksh
+let Predicates = [HasStdExtZksh] in {
+def : PatGpr<int_riscv_sm3p0, SM3P0>;
+def : PatGpr<int_riscv_sm3p1, SM3P1>;
+} // Predicates = [HasStdExtZksh]

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbp-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbp-zbkb.ll
new file mode 100644
index 0000000000000..ecb784a5ff5c3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbp-zbkb.ll
@@ -0,0 +1,502 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV32I
+; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV32ZBB-ZBP-ZBKB
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbp -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV32ZBB-ZBP-ZBKB
+; RUN: llc -mtriple=riscv32 -mattr=+zbkb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV32ZBB-ZBP-ZBKB
+
+define i32 @andn_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: andn_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: andn_i32:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    andn a0, a0, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %neg = xor i32 %b, -1
+  %and = and i32 %neg, %a
+  ret i32 %and
+}
+
+define i64 @andn_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: andn_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    not a3, a3
+; RV32I-NEXT:    not a2, a2
+; RV32I-NEXT:    and a0, a2, a0
+; RV32I-NEXT:    and a1, a3, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: andn_i64:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    andn a0, a0, a2
+; RV32ZBB-ZBP-ZBKB-NEXT:    andn a1, a1, a3
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %neg = xor i64 %b, -1
+  %and = and i64 %neg, %a
+  ret i64 %and
+}
+
+define i32 @orn_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: orn_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: orn_i32:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    orn a0, a0, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %neg = xor i32 %b, -1
+  %or = or i32 %neg, %a
+  ret i32 %or
+}
+
+define i64 @orn_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: orn_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    not a3, a3
+; RV32I-NEXT:    not a2, a2
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a1, a3, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: orn_i64:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    orn a0, a0, a2
+; RV32ZBB-ZBP-ZBKB-NEXT:    orn a1, a1, a3
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %neg = xor i64 %b, -1
+  %or = or i64 %neg, %a
+  ret i64 %or
+}
+
+define i32 @xnor_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: xnor_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: xnor_i32:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    xnor a0, a0, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %neg = xor i32 %a, -1
+  %xor = xor i32 %neg, %b
+  ret i32 %xor
+}
+
+define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: xnor_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    xor a1, a1, a3
+; RV32I-NEXT:    xor a0, a0, a2
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: xnor_i64:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    xnor a0, a0, a2
+; RV32ZBB-ZBP-ZBKB-NEXT:    xnor a1, a1, a3
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %neg = xor i64 %a, -1
+  %xor = xor i64 %neg, %b
+  ret i64 %xor
+}
+
+declare i32 @llvm.fshl.i32(i32, i32, i32)
+
+define i32 @rol_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: rol_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sll a2, a0, a1
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    srl a0, a0, a1
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: rol_i32:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    rol a0, a0, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b)
+  ret i32 %or
+}
+
+; This test is presented here in case future expansions of the Bitmanip
+; extensions introduce instructions suitable for this pattern.
+
+declare i64 @llvm.fshl.i64(i64, i64, i64)
+
+define i64 @rol_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: rol_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a3, a2, 5
+; RV32I-NEXT:    andi a3, a3, 1
+; RV32I-NEXT:    mv a4, a1
+; RV32I-NEXT:    bnez a3, .LBB7_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a4, a0
+; RV32I-NEXT:  .LBB7_2:
+; RV32I-NEXT:    sll a5, a4, a2
+; RV32I-NEXT:    bnez a3, .LBB7_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB7_4:
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    not a6, a2
+; RV32I-NEXT:    srl a1, a1, a6
+; RV32I-NEXT:    or a3, a5, a1
+; RV32I-NEXT:    sll a0, a0, a2
+; RV32I-NEXT:    srli a1, a4, 1
+; RV32I-NEXT:    srl a1, a1, a6
+; RV32I-NEXT:    or a1, a0, a1
+; RV32I-NEXT:    mv a0, a3
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: rol_i64:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    srli a3, a2, 5
+; RV32ZBB-ZBP-ZBKB-NEXT:    andi a3, a3, 1
+; RV32ZBB-ZBP-ZBKB-NEXT:    mv a4, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    bnez a3, .LBB7_2
+; RV32ZBB-ZBP-ZBKB-NEXT:  # %bb.1:
+; RV32ZBB-ZBP-ZBKB-NEXT:    mv a4, a0
+; RV32ZBB-ZBP-ZBKB-NEXT:  .LBB7_2:
+; RV32ZBB-ZBP-ZBKB-NEXT:    sll a5, a4, a2
+; RV32ZBB-ZBP-ZBKB-NEXT:    bnez a3, .LBB7_4
+; RV32ZBB-ZBP-ZBKB-NEXT:  # %bb.3:
+; RV32ZBB-ZBP-ZBKB-NEXT:    mv a0, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:  .LBB7_4:
+; RV32ZBB-ZBP-ZBKB-NEXT:    srli a1, a0, 1
+; RV32ZBB-ZBP-ZBKB-NEXT:    not a6, a2
+; RV32ZBB-ZBP-ZBKB-NEXT:    srl a1, a1, a6
+; RV32ZBB-ZBP-ZBKB-NEXT:    or a3, a5, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    sll a0, a0, a2
+; RV32ZBB-ZBP-ZBKB-NEXT:    srli a1, a4, 1
+; RV32ZBB-ZBP-ZBKB-NEXT:    srl a1, a1, a6
+; RV32ZBB-ZBP-ZBKB-NEXT:    or a1, a0, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    mv a0, a3
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %or = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %b)
+  ret i64 %or
+}
+
+declare i32 @llvm.fshr.i32(i32, i32, i32)
+
+define i32 @ror_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: ror_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srl a2, a0, a1
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: ror_i32:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    ror a0, a0, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %or = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b)
+  ret i32 %or
+}
+
+; This test is presented here in case future expansions of the Bitmanip
+; extensions introduce instructions suitable for this pattern.
+
+declare i64 @llvm.fshr.i64(i64, i64, i64)
+
+define i64 @ror_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: ror_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a4, a2, 32
+; RV32I-NEXT:    mv a3, a0
+; RV32I-NEXT:    beqz a4, .LBB9_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:  .LBB9_2:
+; RV32I-NEXT:    srl a5, a3, a2
+; RV32I-NEXT:    beqz a4, .LBB9_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:  .LBB9_4:
+; RV32I-NEXT:    slli a0, a1, 1
+; RV32I-NEXT:    not a4, a2
+; RV32I-NEXT:    sll a0, a0, a4
+; RV32I-NEXT:    or a0, a0, a5
+; RV32I-NEXT:    srl a1, a1, a2
+; RV32I-NEXT:    slli a2, a3, 1
+; RV32I-NEXT:    sll a2, a2, a4
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: ror_i64:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    andi a4, a2, 32
+; RV32ZBB-ZBP-ZBKB-NEXT:    mv a3, a0
+; RV32ZBB-ZBP-ZBKB-NEXT:    beqz a4, .LBB9_2
+; RV32ZBB-ZBP-ZBKB-NEXT:  # %bb.1:
+; RV32ZBB-ZBP-ZBKB-NEXT:    mv a3, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:  .LBB9_2:
+; RV32ZBB-ZBP-ZBKB-NEXT:    srl a5, a3, a2
+; RV32ZBB-ZBP-ZBKB-NEXT:    beqz a4, .LBB9_4
+; RV32ZBB-ZBP-ZBKB-NEXT:  # %bb.3:
+; RV32ZBB-ZBP-ZBKB-NEXT:    mv a1, a0
+; RV32ZBB-ZBP-ZBKB-NEXT:  .LBB9_4:
+; RV32ZBB-ZBP-ZBKB-NEXT:    slli a0, a1, 1
+; RV32ZBB-ZBP-ZBKB-NEXT:    not a4, a2
+; RV32ZBB-ZBP-ZBKB-NEXT:    sll a0, a0, a4
+; RV32ZBB-ZBP-ZBKB-NEXT:    or a0, a0, a5
+; RV32ZBB-ZBP-ZBKB-NEXT:    srl a1, a1, a2
+; RV32ZBB-ZBP-ZBKB-NEXT:    slli a2, a3, 1
+; RV32ZBB-ZBP-ZBKB-NEXT:    sll a2, a2, a4
+; RV32ZBB-ZBP-ZBKB-NEXT:    or a1, a2, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %or = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %b)
+  ret i64 %or
+}
+
+define i32 @rori_i32_fshl(i32 %a) nounwind {
+; RV32I-LABEL: rori_i32_fshl:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    slli a0, a0, 31
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: rori_i32_fshl:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    rori a0, a0, 1
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31)
+  ret i32 %1
+}
+
+define i32 @rori_i32_fshr(i32 %a) nounwind {
+; RV32I-LABEL: rori_i32_fshr:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 1
+; RV32I-NEXT:    srli a0, a0, 31
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: rori_i32_fshr:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    rori a0, a0, 31
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31)
+  ret i32 %1
+}
+
+define i64 @rori_i64(i64 %a) nounwind {
+; RV32I-LABEL: rori_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    slli a3, a1, 31
+; RV32I-NEXT:    or a2, a3, a2
+; RV32I-NEXT:    srli a1, a1, 1
+; RV32I-NEXT:    slli a0, a0, 31
+; RV32I-NEXT:    or a1, a0, a1
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: rori_i64:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    srli a2, a0, 1
+; RV32ZBB-ZBP-ZBKB-NEXT:    slli a3, a1, 31
+; RV32ZBB-ZBP-ZBKB-NEXT:    or a2, a3, a2
+; RV32ZBB-ZBP-ZBKB-NEXT:    srli a1, a1, 1
+; RV32ZBB-ZBP-ZBKB-NEXT:    slli a0, a0, 31
+; RV32ZBB-ZBP-ZBKB-NEXT:    or a1, a0, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    mv a0, a2
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 63)
+  ret i64 %1
+}
+
+define i64 @rori_i64_fshr(i64 %a) nounwind {
+; RV32I-LABEL: rori_i64_fshr:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a2, a1, 31
+; RV32I-NEXT:    slli a3, a0, 1
+; RV32I-NEXT:    or a2, a3, a2
+; RV32I-NEXT:    srli a0, a0, 31
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    or a1, a1, a0
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: rori_i64_fshr:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    srli a2, a1, 31
+; RV32ZBB-ZBP-ZBKB-NEXT:    slli a3, a0, 1
+; RV32ZBB-ZBP-ZBKB-NEXT:    or a2, a3, a2
+; RV32ZBB-ZBP-ZBKB-NEXT:    srli a0, a0, 31
+; RV32ZBB-ZBP-ZBKB-NEXT:    slli a1, a1, 1
+; RV32ZBB-ZBP-ZBKB-NEXT:    or a1, a1, a0
+; RV32ZBB-ZBP-ZBKB-NEXT:    mv a0, a2
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 63)
+  ret i64 %1
+}
+
+define i8 @srli_i8(i8 %a) nounwind {
+; RV32I-LABEL: srli_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srli a0, a0, 30
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: srli_i8:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    slli a0, a0, 24
+; RV32ZBB-ZBP-ZBKB-NEXT:    srli a0, a0, 30
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %1 = lshr i8 %a, 6
+  ret i8 %1
+}
+
+; We could use sext.b+srai, but slli+srai offers more opportunities for
+; comppressed instructions.
+define i8 @srai_i8(i8 %a) nounwind {
+; RV32I-LABEL: srai_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 29
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: srai_i8:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    slli a0, a0, 24
+; RV32ZBB-ZBP-ZBKB-NEXT:    srai a0, a0, 29
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %1 = ashr i8 %a, 5
+  ret i8 %1
+}
+
+; We could use zext.h+srli, but slli+srli offers more opportunities for
+; comppressed instructions.
+define i16 @srli_i16(i16 %a) nounwind {
+; RV32I-LABEL: srli_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 22
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: srli_i16:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    slli a0, a0, 16
+; RV32ZBB-ZBP-ZBKB-NEXT:    srli a0, a0, 22
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %1 = lshr i16 %a, 6
+  ret i16 %1
+}
+
+; We could use sext.h+srai, but slli+srai offers more opportunities for
+; comppressed instructions.
+define i16 @srai_i16(i16 %a) nounwind {
+; RV32I-LABEL: srai_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 25
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: srai_i16:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    slli a0, a0, 16
+; RV32ZBB-ZBP-ZBKB-NEXT:    srai a0, a0, 25
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %1 = ashr i16 %a, 9
+  ret i16 %1
+}
+
+define i1 @andn_seqz_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: andn_seqz_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: andn_seqz_i32:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    andn a0, a1, a0
+; RV32ZBB-ZBP-ZBKB-NEXT:    seqz a0, a0
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %and = and i32 %a, %b
+  %cmpeq = icmp eq i32 %and, %b
+  ret i1 %cmpeq
+}
+
+define i1 @andn_seqz_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: andn_seqz_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: andn_seqz_i64:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    andn a1, a3, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    andn a0, a2, a0
+; RV32ZBB-ZBP-ZBKB-NEXT:    or a0, a0, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    seqz a0, a0
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %and = and i64 %a, %b
+  %cmpeq = icmp eq i64 %and, %b
+  ret i1 %cmpeq
+}
+
+define i1 @andn_snez_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: andn_snez_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: andn_snez_i32:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    andn a0, a1, a0
+; RV32ZBB-ZBP-ZBKB-NEXT:    snez a0, a0
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %and = and i32 %a, %b
+  %cmpeq = icmp ne i32 %and, %b
+  ret i1 %cmpeq
+}
+
+define i1 @andn_snez_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: andn_snez_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBP-ZBKB-LABEL: andn_snez_i64:
+; RV32ZBB-ZBP-ZBKB:       # %bb.0:
+; RV32ZBB-ZBP-ZBKB-NEXT:    andn a1, a3, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    andn a0, a2, a0
+; RV32ZBB-ZBP-ZBKB-NEXT:    or a0, a0, a1
+; RV32ZBB-ZBP-ZBKB-NEXT:    snez a0, a0
+; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+  %and = and i64 %a, %b
+  %cmpeq = icmp ne i64 %and, %b
+  ret i1 %cmpeq
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbp.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbp.ll
deleted file mode 100644
index ac75dfd0773b3..0000000000000
--- a/llvm/test/CodeGen/RISCV/rv32zbb-zbp.ll
+++ /dev/null
@@ -1,675 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV32I
-; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV32ZBB
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbp -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV32ZBP
-
-define i32 @andn_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: andn_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    not a1, a1
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: andn_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    andn a0, a0, a1
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: andn_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    andn a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %neg = xor i32 %b, -1
-  %and = and i32 %neg, %a
-  ret i32 %and
-}
-
-define i64 @andn_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: andn_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    not a3, a3
-; RV32I-NEXT:    not a2, a2
-; RV32I-NEXT:    and a0, a2, a0
-; RV32I-NEXT:    and a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: andn_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    andn a0, a0, a2
-; RV32ZBB-NEXT:    andn a1, a1, a3
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: andn_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    andn a0, a0, a2
-; RV32ZBP-NEXT:    andn a1, a1, a3
-; RV32ZBP-NEXT:    ret
-  %neg = xor i64 %b, -1
-  %and = and i64 %neg, %a
-  ret i64 %and
-}
-
-define i32 @orn_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: orn_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    not a1, a1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: orn_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    orn a0, a0, a1
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: orn_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orn a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %neg = xor i32 %b, -1
-  %or = or i32 %neg, %a
-  ret i32 %or
-}
-
-define i64 @orn_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: orn_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    not a3, a3
-; RV32I-NEXT:    not a2, a2
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: orn_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    orn a0, a0, a2
-; RV32ZBB-NEXT:    orn a1, a1, a3
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: orn_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orn a0, a0, a2
-; RV32ZBP-NEXT:    orn a1, a1, a3
-; RV32ZBP-NEXT:    ret
-  %neg = xor i64 %b, -1
-  %or = or i64 %neg, %a
-  ret i64 %or
-}
-
-define i32 @xnor_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: xnor_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: xnor_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    xnor a0, a0, a1
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: xnor_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    xnor a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %neg = xor i32 %a, -1
-  %xor = xor i32 %neg, %b
-  ret i32 %xor
-}
-
-define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: xnor_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    xor a1, a1, a3
-; RV32I-NEXT:    xor a0, a0, a2
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    not a1, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: xnor_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    xnor a0, a0, a2
-; RV32ZBB-NEXT:    xnor a1, a1, a3
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: xnor_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    xnor a0, a0, a2
-; RV32ZBP-NEXT:    xnor a1, a1, a3
-; RV32ZBP-NEXT:    ret
-  %neg = xor i64 %a, -1
-  %xor = xor i64 %neg, %b
-  ret i64 %xor
-}
-
-declare i32 @llvm.fshl.i32(i32, i32, i32)
-
-define i32 @rol_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: rol_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    sll a2, a0, a1
-; RV32I-NEXT:    neg a1, a1
-; RV32I-NEXT:    srl a0, a0, a1
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: rol_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    rol a0, a0, a1
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: rol_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rol a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b)
-  ret i32 %or
-}
-
-; This test is presented here in case future expansions of the Bitmanip
-; extensions introduce instructions suitable for this pattern.
-
-declare i64 @llvm.fshl.i64(i64, i64, i64)
-
-define i64 @rol_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: rol_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a3, a2, 5
-; RV32I-NEXT:    andi a3, a3, 1
-; RV32I-NEXT:    mv a4, a1
-; RV32I-NEXT:    bnez a3, .LBB7_2
-; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    mv a4, a0
-; RV32I-NEXT:  .LBB7_2:
-; RV32I-NEXT:    sll a5, a4, a2
-; RV32I-NEXT:    bnez a3, .LBB7_4
-; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:  .LBB7_4:
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    not a6, a2
-; RV32I-NEXT:    srl a1, a1, a6
-; RV32I-NEXT:    or a3, a5, a1
-; RV32I-NEXT:    sll a0, a0, a2
-; RV32I-NEXT:    srli a1, a4, 1
-; RV32I-NEXT:    srl a1, a1, a6
-; RV32I-NEXT:    or a1, a0, a1
-; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: rol_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    srli a3, a2, 5
-; RV32ZBB-NEXT:    andi a3, a3, 1
-; RV32ZBB-NEXT:    mv a4, a1
-; RV32ZBB-NEXT:    bnez a3, .LBB7_2
-; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    mv a4, a0
-; RV32ZBB-NEXT:  .LBB7_2:
-; RV32ZBB-NEXT:    sll a5, a4, a2
-; RV32ZBB-NEXT:    bnez a3, .LBB7_4
-; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    mv a0, a1
-; RV32ZBB-NEXT:  .LBB7_4:
-; RV32ZBB-NEXT:    srli a1, a0, 1
-; RV32ZBB-NEXT:    not a6, a2
-; RV32ZBB-NEXT:    srl a1, a1, a6
-; RV32ZBB-NEXT:    or a3, a5, a1
-; RV32ZBB-NEXT:    sll a0, a0, a2
-; RV32ZBB-NEXT:    srli a1, a4, 1
-; RV32ZBB-NEXT:    srl a1, a1, a6
-; RV32ZBB-NEXT:    or a1, a0, a1
-; RV32ZBB-NEXT:    mv a0, a3
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: rol_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    srli a3, a2, 5
-; RV32ZBP-NEXT:    andi a3, a3, 1
-; RV32ZBP-NEXT:    mv a4, a1
-; RV32ZBP-NEXT:    bnez a3, .LBB7_2
-; RV32ZBP-NEXT:  # %bb.1:
-; RV32ZBP-NEXT:    mv a4, a0
-; RV32ZBP-NEXT:  .LBB7_2:
-; RV32ZBP-NEXT:    sll a5, a4, a2
-; RV32ZBP-NEXT:    bnez a3, .LBB7_4
-; RV32ZBP-NEXT:  # %bb.3:
-; RV32ZBP-NEXT:    mv a0, a1
-; RV32ZBP-NEXT:  .LBB7_4:
-; RV32ZBP-NEXT:    srli a1, a0, 1
-; RV32ZBP-NEXT:    not a6, a2
-; RV32ZBP-NEXT:    srl a1, a1, a6
-; RV32ZBP-NEXT:    or a3, a5, a1
-; RV32ZBP-NEXT:    sll a0, a0, a2
-; RV32ZBP-NEXT:    srli a1, a4, 1
-; RV32ZBP-NEXT:    srl a1, a1, a6
-; RV32ZBP-NEXT:    or a1, a0, a1
-; RV32ZBP-NEXT:    mv a0, a3
-; RV32ZBP-NEXT:    ret
-  %or = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %b)
-  ret i64 %or
-}
-
-declare i32 @llvm.fshr.i32(i32, i32, i32)
-
-define i32 @ror_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: ror_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srl a2, a0, a1
-; RV32I-NEXT:    neg a1, a1
-; RV32I-NEXT:    sll a0, a0, a1
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: ror_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    ror a0, a0, a1
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: ror_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    ror a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %or = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b)
-  ret i32 %or
-}
-
-; This test is presented here in case future expansions of the Bitmanip
-; extensions introduce instructions suitable for this pattern.
-
-declare i64 @llvm.fshr.i64(i64, i64, i64)
-
-define i64 @ror_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: ror_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    andi a4, a2, 32
-; RV32I-NEXT:    mv a3, a0
-; RV32I-NEXT:    beqz a4, .LBB9_2
-; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    mv a3, a1
-; RV32I-NEXT:  .LBB9_2:
-; RV32I-NEXT:    srl a5, a3, a2
-; RV32I-NEXT:    beqz a4, .LBB9_4
-; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:  .LBB9_4:
-; RV32I-NEXT:    slli a0, a1, 1
-; RV32I-NEXT:    not a4, a2
-; RV32I-NEXT:    sll a0, a0, a4
-; RV32I-NEXT:    or a0, a0, a5
-; RV32I-NEXT:    srl a1, a1, a2
-; RV32I-NEXT:    slli a2, a3, 1
-; RV32I-NEXT:    sll a2, a2, a4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: ror_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    andi a4, a2, 32
-; RV32ZBB-NEXT:    mv a3, a0
-; RV32ZBB-NEXT:    beqz a4, .LBB9_2
-; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    mv a3, a1
-; RV32ZBB-NEXT:  .LBB9_2:
-; RV32ZBB-NEXT:    srl a5, a3, a2
-; RV32ZBB-NEXT:    beqz a4, .LBB9_4
-; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    mv a1, a0
-; RV32ZBB-NEXT:  .LBB9_4:
-; RV32ZBB-NEXT:    slli a0, a1, 1
-; RV32ZBB-NEXT:    not a4, a2
-; RV32ZBB-NEXT:    sll a0, a0, a4
-; RV32ZBB-NEXT:    or a0, a0, a5
-; RV32ZBB-NEXT:    srl a1, a1, a2
-; RV32ZBB-NEXT:    slli a2, a3, 1
-; RV32ZBB-NEXT:    sll a2, a2, a4
-; RV32ZBB-NEXT:    or a1, a2, a1
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: ror_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    andi a4, a2, 32
-; RV32ZBP-NEXT:    mv a3, a0
-; RV32ZBP-NEXT:    beqz a4, .LBB9_2
-; RV32ZBP-NEXT:  # %bb.1:
-; RV32ZBP-NEXT:    mv a3, a1
-; RV32ZBP-NEXT:  .LBB9_2:
-; RV32ZBP-NEXT:    srl a5, a3, a2
-; RV32ZBP-NEXT:    beqz a4, .LBB9_4
-; RV32ZBP-NEXT:  # %bb.3:
-; RV32ZBP-NEXT:    mv a1, a0
-; RV32ZBP-NEXT:  .LBB9_4:
-; RV32ZBP-NEXT:    slli a0, a1, 1
-; RV32ZBP-NEXT:    not a4, a2
-; RV32ZBP-NEXT:    sll a0, a0, a4
-; RV32ZBP-NEXT:    or a0, a0, a5
-; RV32ZBP-NEXT:    srl a1, a1, a2
-; RV32ZBP-NEXT:    slli a2, a3, 1
-; RV32ZBP-NEXT:    sll a2, a2, a4
-; RV32ZBP-NEXT:    or a1, a2, a1
-; RV32ZBP-NEXT:    ret
-  %or = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %b)
-  ret i64 %or
-}
-
-define i32 @rori_i32_fshl(i32 %a) nounwind {
-; RV32I-LABEL: rori_i32_fshl:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    slli a0, a0, 31
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: rori_i32_fshl:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    rori a0, a0, 1
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: rori_i32_fshl:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rori a0, a0, 1
-; RV32ZBP-NEXT:    ret
-  %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31)
-  ret i32 %1
-}
-
-define i32 @rori_i32_fshr(i32 %a) nounwind {
-; RV32I-LABEL: rori_i32_fshr:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    srli a0, a0, 31
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: rori_i32_fshr:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    rori a0, a0, 31
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: rori_i32_fshr:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rori a0, a0, 31
-; RV32ZBP-NEXT:    ret
-  %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31)
-  ret i32 %1
-}
-
-define i64 @rori_i64(i64 %a) nounwind {
-; RV32I-LABEL: rori_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a2, a0, 1
-; RV32I-NEXT:    slli a3, a1, 31
-; RV32I-NEXT:    or a2, a3, a2
-; RV32I-NEXT:    srli a1, a1, 1
-; RV32I-NEXT:    slli a0, a0, 31
-; RV32I-NEXT:    or a1, a0, a1
-; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: rori_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    srli a2, a0, 1
-; RV32ZBB-NEXT:    slli a3, a1, 31
-; RV32ZBB-NEXT:    or a2, a3, a2
-; RV32ZBB-NEXT:    srli a1, a1, 1
-; RV32ZBB-NEXT:    slli a0, a0, 31
-; RV32ZBB-NEXT:    or a1, a0, a1
-; RV32ZBB-NEXT:    mv a0, a2
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: rori_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    srli a2, a0, 1
-; RV32ZBP-NEXT:    slli a3, a1, 31
-; RV32ZBP-NEXT:    or a2, a3, a2
-; RV32ZBP-NEXT:    srli a1, a1, 1
-; RV32ZBP-NEXT:    slli a0, a0, 31
-; RV32ZBP-NEXT:    or a1, a0, a1
-; RV32ZBP-NEXT:    mv a0, a2
-; RV32ZBP-NEXT:    ret
-  %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 63)
-  ret i64 %1
-}
-
-define i64 @rori_i64_fshr(i64 %a) nounwind {
-; RV32I-LABEL: rori_i64_fshr:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a2, a1, 31
-; RV32I-NEXT:    slli a3, a0, 1
-; RV32I-NEXT:    or a2, a3, a2
-; RV32I-NEXT:    srli a0, a0, 31
-; RV32I-NEXT:    slli a1, a1, 1
-; RV32I-NEXT:    or a1, a1, a0
-; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: rori_i64_fshr:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    srli a2, a1, 31
-; RV32ZBB-NEXT:    slli a3, a0, 1
-; RV32ZBB-NEXT:    or a2, a3, a2
-; RV32ZBB-NEXT:    srli a0, a0, 31
-; RV32ZBB-NEXT:    slli a1, a1, 1
-; RV32ZBB-NEXT:    or a1, a1, a0
-; RV32ZBB-NEXT:    mv a0, a2
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: rori_i64_fshr:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    srli a2, a1, 31
-; RV32ZBP-NEXT:    slli a3, a0, 1
-; RV32ZBP-NEXT:    or a2, a3, a2
-; RV32ZBP-NEXT:    srli a0, a0, 31
-; RV32ZBP-NEXT:    slli a1, a1, 1
-; RV32ZBP-NEXT:    or a1, a1, a0
-; RV32ZBP-NEXT:    mv a0, a2
-; RV32ZBP-NEXT:    ret
-  %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 63)
-  ret i64 %1
-}
-
-define i8 @srli_i8(i8 %a) nounwind {
-; RV32I-LABEL: srli_i8:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    srli a0, a0, 30
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: srli_i8:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    slli a0, a0, 24
-; RV32ZBB-NEXT:    srli a0, a0, 30
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: srli_i8:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    slli a0, a0, 24
-; RV32ZBP-NEXT:    srli a0, a0, 30
-; RV32ZBP-NEXT:    ret
-  %1 = lshr i8 %a, 6
-  ret i8 %1
-}
-
-; We could use sext.b+srai, but slli+srai offers more opportunities for
-; comppressed instructions.
-define i8 @srai_i8(i8 %a) nounwind {
-; RV32I-LABEL: srai_i8:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    srai a0, a0, 29
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: srai_i8:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    slli a0, a0, 24
-; RV32ZBB-NEXT:    srai a0, a0, 29
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: srai_i8:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    slli a0, a0, 24
-; RV32ZBP-NEXT:    srai a0, a0, 29
-; RV32ZBP-NEXT:    ret
-  %1 = ashr i8 %a, 5
-  ret i8 %1
-}
-
-; We could use zext.h+srli, but slli+srli offers more opportunities for
-; comppressed instructions.
-define i16 @srli_i16(i16 %a) nounwind {
-; RV32I-LABEL: srli_i16:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    srli a0, a0, 22
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: srli_i16:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    slli a0, a0, 16
-; RV32ZBB-NEXT:    srli a0, a0, 22
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: srli_i16:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    slli a0, a0, 16
-; RV32ZBP-NEXT:    srli a0, a0, 22
-; RV32ZBP-NEXT:    ret
-  %1 = lshr i16 %a, 6
-  ret i16 %1
-}
-
-; We could use sext.h+srai, but slli+srai offers more opportunities for
-; comppressed instructions.
-define i16 @srai_i16(i16 %a) nounwind {
-; RV32I-LABEL: srai_i16:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    srai a0, a0, 25
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: srai_i16:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    slli a0, a0, 16
-; RV32ZBB-NEXT:    srai a0, a0, 25
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: srai_i16:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    slli a0, a0, 16
-; RV32ZBP-NEXT:    srai a0, a0, 25
-; RV32ZBP-NEXT:    ret
-  %1 = ashr i16 %a, 9
-  ret i16 %1
-}
-
-define i1 @andn_seqz_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: andn_seqz_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    seqz a0, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: andn_seqz_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    andn a0, a1, a0
-; RV32ZBB-NEXT:    seqz a0, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: andn_seqz_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    andn a0, a1, a0
-; RV32ZBP-NEXT:    seqz a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = and i32 %a, %b
-  %cmpeq = icmp eq i32 %and, %b
-  ret i1 %cmpeq
-}
-
-define i1 @andn_seqz_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: andn_seqz_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    not a1, a1
-; RV32I-NEXT:    and a1, a1, a3
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    seqz a0, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: andn_seqz_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    andn a1, a3, a1
-; RV32ZBB-NEXT:    andn a0, a2, a0
-; RV32ZBB-NEXT:    or a0, a0, a1
-; RV32ZBB-NEXT:    seqz a0, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: andn_seqz_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    andn a1, a3, a1
-; RV32ZBP-NEXT:    andn a0, a2, a0
-; RV32ZBP-NEXT:    or a0, a0, a1
-; RV32ZBP-NEXT:    seqz a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = and i64 %a, %b
-  %cmpeq = icmp eq i64 %and, %b
-  ret i1 %cmpeq
-}
-
-define i1 @andn_snez_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: andn_snez_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    snez a0, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: andn_snez_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    andn a0, a1, a0
-; RV32ZBB-NEXT:    snez a0, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: andn_snez_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    andn a0, a1, a0
-; RV32ZBP-NEXT:    snez a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = and i32 %a, %b
-  %cmpeq = icmp ne i32 %and, %b
-  ret i1 %cmpeq
-}
-
-define i1 @andn_snez_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: andn_snez_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    not a1, a1
-; RV32I-NEXT:    and a1, a1, a3
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    snez a0, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: andn_snez_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    andn a1, a3, a1
-; RV32ZBB-NEXT:    andn a0, a2, a0
-; RV32ZBB-NEXT:    or a0, a0, a1
-; RV32ZBB-NEXT:    snez a0, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV32ZBP-LABEL: andn_snez_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    andn a1, a3, a1
-; RV32ZBP-NEXT:    andn a0, a2, a0
-; RV32ZBP-NEXT:    or a0, a0, a1
-; RV32ZBP-NEXT:    snez a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = and i64 %a, %b
-  %cmpeq = icmp ne i64 %and, %b
-  ret i1 %cmpeq
-}

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll
index 870ac34153ad2..6870ae57d0f9f 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll
@@ -2,28 +2,6 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+zbc -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32ZBC
 
-declare i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b)
-
-define i32 @clmul32(i32 %a, i32 %b) nounwind {
-; RV32ZBC-LABEL: clmul32:
-; RV32ZBC:       # %bb.0:
-; RV32ZBC-NEXT:    clmul a0, a0, a1
-; RV32ZBC-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b)
- ret i32 %tmp
-}
-
-declare i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b)
-
-define i32 @clmul32h(i32 %a, i32 %b) nounwind {
-; RV32ZBC-LABEL: clmul32h:
-; RV32ZBC:       # %bb.0:
-; RV32ZBC-NEXT:    clmulh a0, a0, a1
-; RV32ZBC-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b)
- ret i32 %tmp
-}
-
 declare i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b)
 
 define i32 @clmul32r(i32 %a, i32 %b) nounwind {

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll
new file mode 100644
index 0000000000000..823e14304aee2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+zbc -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZBC-ZBKC
+; RUN: llc -mtriple=riscv32 -mattr=+zbkc -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZBC-ZBKC
+
+declare i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b)
+
+define i32 @clmul32(i32 %a, i32 %b) nounwind {
+; RV32ZBC-ZBKC-LABEL: clmul32:
+; RV32ZBC-ZBKC:       # %bb.0:
+; RV32ZBC-ZBKC-NEXT:    clmul a0, a0, a1
+; RV32ZBC-ZBKC-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b)
+
+define i32 @clmul32h(i32 %a, i32 %b) nounwind {
+; RV32ZBC-ZBKC-LABEL: clmul32h:
+; RV32ZBC-ZBKC:       # %bb.0:
+; RV32ZBC-ZBKC-NEXT:    clmulh a0, a0, a1
+; RV32ZBC-ZBKC-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b)
+ ret i32 %tmp
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll
new file mode 100644
index 0000000000000..9efd7e5feb3f0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll
@@ -0,0 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+zbkb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZBKB
+
+declare i32 @llvm.riscv.brev8(i32);
+
+define i32 @brev8(i32 %a) nounwind {
+; RV32ZBKB-LABEL: brev8:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    ret
+  %val = call i32 @llvm.riscv.brev8(i32 %a)
+  ret i32 %val
+}
+
+declare i32 @llvm.bswap.i32(i32)
+
+define i32 @rev8_i32(i32 %a) nounwind {
+; RV32ZBKB-LABEL: rev8_i32:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    ret
+  %1 = tail call i32 @llvm.bswap.i32(i32 %a)
+  ret i32 %1
+}
+
+declare i32 @llvm.riscv.zip(i32);
+
+define i32 @zip(i32 %a) nounwind {
+; RV32ZBKB-LABEL: zip:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    zip a0, a0
+; RV32ZBKB-NEXT:    ret
+  %val = call i32 @llvm.riscv.zip(i32 %a)
+  ret i32 %val
+}
+
+declare i32 @llvm.riscv.unzip(i32);
+
+define i32 @unzip(i32 %a) nounwind {
+; RV32ZBKB-LABEL: unzip:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    unzip a0, a0
+; RV32ZBKB-NEXT:    ret
+  %val = call i32 @llvm.riscv.unzip(i32 %a)
+  ret i32 %val
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll
new file mode 100644
index 0000000000000..eeb2997fe850a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mtriple=riscv32 -mattr=+zbkx -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZBKX
+
+declare i32 @llvm.riscv.xperm8.i32(i32 %a, i32 %b)
+
+define i32 @xperm8(i32 %a, i32 %b) nounwind {
+; RV32ZBKX-LABEL: xperm8:
+; RV32ZBKX:       # %bb.0:
+; RV32ZBKX-NEXT:    xperm8 a0, a0, a1
+; RV32ZBKX-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.xperm8.i32(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.xperm4.i32(i32 %a, i32 %b)
+
+define i32 @xperm4(i32 %a, i32 %b) nounwind {
+; RV32ZBKX-LABEL: xperm4:
+; RV32ZBKX:       # %bb.0:
+; RV32ZBKX-NEXT:    xperm4 a0, a0, a1
+; RV32ZBKX-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.xperm4.i32(i32 %a, i32 %b)
+ ret i32 %tmp
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbp-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbp-zbkb.ll
new file mode 100644
index 0000000000000..31e8c8909ccca
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32zbp-zbkb.ll
@@ -0,0 +1,149 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbp -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZBP-ZBKB
+; RUN: llc -mtriple=riscv32 -mattr=+zbkb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZBP-ZBKB
+
+define i32 @pack_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: pack_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+  %shl = and i32 %a, 65535
+  %shl1 = shl i32 %b, 16
+  %or = or i32 %shl1, %shl
+  ret i32 %or
+}
+
+; As we are not matching directly i64 code patterns on RV32 some i64 patterns
+; don't have yet any matching bit manipulation instructions on RV32.
+; This test is presented here in case future expansions of the Bitmanip
+; extensions introduce instructions suitable for this pattern.
+
+define i64 @pack_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: pack_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    mv a1, a2
+; RV32I-NEXT:    ret
+;
+; RV32ZBP-ZBKB-LABEL: pack_i64:
+; RV32ZBP-ZBKB:       # %bb.0:
+; RV32ZBP-ZBKB-NEXT:    mv a1, a2
+; RV32ZBP-ZBKB-NEXT:    ret
+  %shl = and i64 %a, 4294967295
+  %shl1 = shl i64 %b, 32
+  %or = or i64 %shl1, %shl
+  ret i64 %or
+}
+
+; As we are not matching directly i64 code patterns on RV32 some i64 patterns
+; don't have yet any matching bit manipulation instructions on RV32.
+; This test is presented here in case future expansions of the Bitmanip
+; extensions introduce instructions suitable for this pattern.
+
+define i64 @packu_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: packu_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:    mv a1, a3
+; RV32I-NEXT:    ret
+;
+; RV32ZBP-ZBKB-LABEL: packu_i64:
+; RV32ZBP-ZBKB:       # %bb.0:
+; RV32ZBP-ZBKB-NEXT:    mv a0, a1
+; RV32ZBP-ZBKB-NEXT:    mv a1, a3
+; RV32ZBP-ZBKB-NEXT:    ret
+  %shr = lshr i64 %a, 32
+  %shr1 = and i64 %b, -4294967296
+  %or = or i64 %shr1, %shr
+  ret i64 %or
+}
+
+define i32 @packh_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: packh_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srli a1, a1, 16
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBP-ZBKB-LABEL: packh_i32:
+; RV32ZBP-ZBKB:       # %bb.0:
+; RV32ZBP-ZBKB-NEXT:    packh a0, a0, a1
+; RV32ZBP-ZBKB-NEXT:    ret
+  %and = and i32 %a, 255
+  %and1 = shl i32 %b, 8
+  %shl = and i32 %and1, 65280
+  %or = or i32 %shl, %and
+  ret i32 %or
+}
+
+define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: packh_i32_2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    andi a1, a1, 255
+; RV32I-NEXT:    slli a1, a1, 8
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBP-ZBKB-LABEL: packh_i32_2:
+; RV32ZBP-ZBKB:       # %bb.0:
+; RV32ZBP-ZBKB-NEXT:    packh a0, a0, a1
+; RV32ZBP-ZBKB-NEXT:    ret
+  %and = and i32 %a, 255
+  %and1 = and i32 %b, 255
+  %shl = shl i32 %and1, 8
+  %or = or i32 %shl, %and
+  ret i32 %or
+}
+
+define i64 @packh_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: packh_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    slli a1, a2, 24
+; RV32I-NEXT:    srli a1, a1, 16
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    ret
+;
+; RV32ZBP-ZBKB-LABEL: packh_i64:
+; RV32ZBP-ZBKB:       # %bb.0:
+; RV32ZBP-ZBKB-NEXT:    packh a0, a0, a2
+; RV32ZBP-ZBKB-NEXT:    li a1, 0
+; RV32ZBP-ZBKB-NEXT:    ret
+  %and = and i64 %a, 255
+  %and1 = shl i64 %b, 8
+  %shl = and i64 %and1, 65280
+  %or = or i64 %shl, %and
+  ret i64 %or
+}
+
+define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: packh_i64_2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    andi a1, a2, 255
+; RV32I-NEXT:    slli a1, a1, 8
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    ret
+;
+; RV32ZBP-ZBKB-LABEL: packh_i64_2:
+; RV32ZBP-ZBKB:       # %bb.0:
+; RV32ZBP-ZBKB-NEXT:    packh a0, a0, a2
+; RV32ZBP-ZBKB-NEXT:    li a1, 0
+; RV32ZBP-ZBKB-NEXT:    ret
+  %and = and i64 %a, 255
+  %and1 = and i64 %b, 255
+  %shl = shl i64 %and1, 8
+  %or = or i64 %shl, %and
+  ret i64 %or
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbp.ll b/llvm/test/CodeGen/RISCV/rv32zbp.ll
index c51151f47962f..d021b26f45612 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbp.ll
@@ -2773,46 +2773,6 @@ define i64 @shfl8_i64(i64 %a, i64 %b) nounwind {
   ret i64 %or3
 }
 
-define i32 @pack_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: pack_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    slli a1, a1, 16
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: pack_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    pack a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %shl = and i32 %a, 65535
-  %shl1 = shl i32 %b, 16
-  %or = or i32 %shl1, %shl
-  ret i32 %or
-}
-
-; As we are not matching directly i64 code patterns on RV32 some i64 patterns
-; don't have yet any matching bit manipulation instructions on RV32.
-; This test is presented here in case future expansions of the Bitmanip
-; extensions introduce instructions suitable for this pattern.
-
-define i64 @pack_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: pack_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    mv a1, a2
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: pack_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    mv a1, a2
-; RV32ZBP-NEXT:    ret
-  %shl = and i64 %a, 4294967295
-  %shl1 = shl i64 %b, 32
-  %or = or i64 %shl1, %shl
-  ret i64 %or
-}
-
 define i32 @packu_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: packu_i32:
 ; RV32I:       # %bb.0:
@@ -2832,113 +2792,6 @@ define i32 @packu_i32(i32 %a, i32 %b) nounwind {
   ret i32 %or
 }
 
-; As we are not matching directly i64 code patterns on RV32 some i64 patterns
-; don't have yet any matching bit manipulation instructions on RV32.
-; This test is presented here in case future expansions of the Bitmanip
-; extensions introduce instructions suitable for this pattern.
-
-define i64 @packu_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: packu_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    mv a1, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: packu_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    mv a0, a1
-; RV32ZBP-NEXT:    mv a1, a3
-; RV32ZBP-NEXT:    ret
-  %shr = lshr i64 %a, 32
-  %shr1 = and i64 %b, -4294967296
-  %or = or i64 %shr1, %shr
-  ret i64 %or
-}
-
-define i32 @packh_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: packh_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    andi a0, a0, 255
-; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    srli a1, a1, 16
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: packh_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    packh a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %and = and i32 %a, 255
-  %and1 = shl i32 %b, 8
-  %shl = and i32 %and1, 65280
-  %or = or i32 %shl, %and
-  ret i32 %or
-}
-
-define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: packh_i32_2:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    andi a0, a0, 255
-; RV32I-NEXT:    andi a1, a1, 255
-; RV32I-NEXT:    slli a1, a1, 8
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: packh_i32_2:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    packh a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %and = and i32 %a, 255
-  %and1 = and i32 %b, 255
-  %shl = shl i32 %and1, 8
-  %or = or i32 %shl, %and
-  ret i32 %or
-}
-
-define i64 @packh_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: packh_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    andi a0, a0, 255
-; RV32I-NEXT:    slli a1, a2, 24
-; RV32I-NEXT:    srli a1, a1, 16
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: packh_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    packh a0, a0, a2
-; RV32ZBP-NEXT:    li a1, 0
-; RV32ZBP-NEXT:    ret
-  %and = and i64 %a, 255
-  %and1 = shl i64 %b, 8
-  %shl = and i64 %and1, 65280
-  %or = or i64 %shl, %and
-  ret i64 %or
-}
-
-define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: packh_i64_2:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    andi a0, a0, 255
-; RV32I-NEXT:    andi a1, a2, 255
-; RV32I-NEXT:    slli a1, a1, 8
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: packh_i64_2:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    packh a0, a0, a2
-; RV32ZBP-NEXT:    li a1, 0
-; RV32ZBP-NEXT:    ret
-  %and = and i64 %a, 255
-  %and1 = and i64 %b, 255
-  %shl = shl i64 %and1, 8
-  %or = or i64 %shl, %and
-  ret i64 %or
-}
-
 define i32 @zexth_i32(i32 %a) nounwind {
 ; RV32I-LABEL: zexth_i32:
 ; RV32I:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll
new file mode 100644
index 0000000000000..2f7d2a2f07d91
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+zknd -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZKND
+
+declare i32 @llvm.riscv.aes32dsi(i32, i32, i8);
+
+define i32 @aes32dsi(i32 %a, i32 %b) nounwind {
+; RV32ZKND-LABEL: aes32dsi
+; RV32ZKND: # %bb.0:
+; RV32ZKND-NEXT: aes32dsi a0, a0, a1, 0
+; RV32ZKND-NEXT: ret
+    %val = call i32 @llvm.riscv.aes32dsi(i32 %a, i32 %b, i8 0)
+    ret i32 %val
+}
+
+declare i32 @llvm.riscv.aes32dsmi(i32, i32, i8);
+
+define i32 @aes32dsmi(i32 %a, i32 %b) nounwind {
+; RV32ZKND-LABEL: aes32dsmi
+; RV32ZKND: # %bb.0:
+; RV32ZKND-NEXT: aes32dsmi a0, a0, a1, 1
+; RV32ZKND-NEXT: ret
+    %val = call i32 @llvm.riscv.aes32dsmi(i32 %a, i32 %b, i8 1)
+    ret i32 %val
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll
new file mode 100644
index 0000000000000..3b8937b2549da
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+zkne -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZKNE
+
+declare i32 @llvm.riscv.aes32esi(i32, i32, i8);
+
+define i32 @aes32esi(i32 %a, i32 %b) nounwind {
+; RV32ZKNE-LABEL: aes32esi
+; RV32ZKNE: # %bb.0:
+; RV32ZKNE-NEXT: aes32esi a0, a0, a1, 2
+; RV32ZKNE-NEXT: ret
+    %val = call i32 @llvm.riscv.aes32esi(i32 %a, i32 %b, i8 2)
+    ret i32 %val
+}
+
+declare i32 @llvm.riscv.aes32esmi(i32, i32, i8);
+
+define i32 @aes32esmi(i32 %a, i32 %b) nounwind {
+; RV32ZKNE-LABEL: aes32esmi
+; RV32ZKNE: # %bb.0:
+; RV32ZKNE-NEXT: aes32esmi a0, a0, a1, 3
+; RV32ZKNE-NEXT: ret
+    %val = call i32 @llvm.riscv.aes32esmi(i32 %a, i32 %b, i8 3)
+    ret i32 %val
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll
new file mode 100644
index 0000000000000..f6be9f012e85c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll
@@ -0,0 +1,114 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+zknh -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZKNH
+
+
+declare i32 @llvm.riscv.sha256sig0.i32(i32);
+
+define i32 @sha256sig0_i32(i32 %a) nounwind {
+; RV32ZKNH-LABEL: sha256sig0_i32
+; RV32ZKNH: # %bb.0:
+; RV32ZKNH-NEXT: sha256sig0 a0, a0
+; RV32ZKNH-NEXT: ret
+    %val = call i32 @llvm.riscv.sha256sig0.i32(i32 %a)
+    ret i32 %val
+}
+
+declare i32 @llvm.riscv.sha256sig1.i32(i32);
+
+define i32 @sha256sig1_i32(i32 %a) nounwind {
+; RV32ZKNH-LABEL: sha256sig1_i32
+; RV32ZKNH: # %bb.0:
+; RV32ZKNH-NEXT: sha256sig1 a0, a0
+; RV32ZKNH-NEXT: ret
+    %val = call i32 @llvm.riscv.sha256sig1.i32(i32 %a)
+    ret i32 %val
+}
+
+declare i32 @llvm.riscv.sha256sum0.i32(i32);
+
+define i32 @sha256sum0_i32(i32 %a) nounwind {
+; RV32ZKNH-LABEL: sha256sum0_i32
+; RV32ZKNH: # %bb.0:
+; RV32ZKNH-NEXT: sha256sum0 a0, a0
+; RV32ZKNH-NEXT: ret
+    %val = call i32 @llvm.riscv.sha256sum0.i32(i32 %a)
+    ret i32 %val
+}
+
+declare i32 @llvm.riscv.sha256sum1.i32(i32);
+
+define i32 @sha256sum1_i32(i32 %a) nounwind {
+; RV32ZKNH-LABEL: sha256sum1_i32
+; RV32ZKNH: # %bb.0:
+; RV32ZKNH-NEXT: sha256sum1 a0, a0
+; RV32ZKNH-NEXT: ret
+    %val = call i32 @llvm.riscv.sha256sum1.i32(i32 %a)
+    ret i32 %val
+}
+
+declare i32 @llvm.riscv.sha512sig0l(i32, i32);
+
+define i32 @sha512sig0l(i32 %a, i32 %b) nounwind {
+; RV32ZKNH-LABEL: sha512sig0l
+; RV32ZKNH: # %bb.0:
+; RV32ZKNH-NEXT: sha512sig0l a0, a0, a1
+; RV32ZKNH-NEXT: ret
+    %val = call i32 @llvm.riscv.sha512sig0l(i32 %a, i32 %b)
+    ret i32 %val
+}
+
+declare i32 @llvm.riscv.sha512sig0h(i32, i32);
+
+define i32 @sha512sig0h(i32 %a, i32 %b) nounwind {
+; RV32ZKNH-LABEL: sha512sig0h
+; RV32ZKNH: # %bb.0:
+; RV32ZKNH-NEXT: sha512sig0h a0, a0, a1
+; RV32ZKNH-NEXT: ret
+    %val = call i32 @llvm.riscv.sha512sig0h(i32 %a, i32 %b)
+    ret i32 %val
+}
+
+declare i32 @llvm.riscv.sha512sig1l(i32, i32);
+
+define i32 @sha512sig1l(i32 %a, i32 %b) nounwind {
+; RV32ZKNH-LABEL: sha512sig1l
+; RV32ZKNH: # %bb.0:
+; RV32ZKNH-NEXT: sha512sig1l a0, a0, a1
+; RV32ZKNH-NEXT: ret
+    %val = call i32 @llvm.riscv.sha512sig1l(i32 %a, i32 %b)
+    ret i32 %val
+}
+
+declare i32 @llvm.riscv.sha512sig1h(i32, i32);
+
+define i32 @sha512sig1h(i32 %a, i32 %b) nounwind {
+; RV32ZKNH-LABEL: sha512sig1h
+; RV32ZKNH: # %bb.0:
+; RV32ZKNH-NEXT: sha512sig1h a0, a0, a1
+; RV32ZKNH-NEXT: ret
+    %val = call i32 @llvm.riscv.sha512sig1h(i32 %a, i32 %b)
+    ret i32 %val
+}
+
+declare i32 @llvm.riscv.sha512sum0r(i32, i32);
+
+define i32 @sha512sum0r(i32 %a, i32 %b) nounwind {
+; RV32ZKNH-LABEL: sha512sum0r
+; RV32ZKNH: # %bb.0:
+; RV32ZKNH-NEXT: sha512sum0r a0, a0, a1
+; RV32ZKNH-NEXT: ret
+    %val = call i32 @llvm.riscv.sha512sum0r(i32 %a, i32 %b)
+    ret i32 %val
+}
+
+declare i32 @llvm.riscv.sha512sum1r(i32, i32);
+
+define i32 @sha512sum1r(i32 %a, i32 %b) nounwind {
+; RV32ZKNH-LABEL: sha512sum1r
+; RV32ZKNH: # %bb.0:
+; RV32ZKNH-NEXT: sha512sum1r a0, a0, a1
+; RV32ZKNH-NEXT: ret
+    %val = call i32 @llvm.riscv.sha512sum1r(i32 %a, i32 %b)
+    ret i32 %val
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll
new file mode 100644
index 0000000000000..e8ecb4f3decd2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+zksed -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZKSED
+
+declare i32 @llvm.riscv.sm4ks.i32(i32, i32, i8);
+
+define i32 @sm4ks_i32(i32 %a, i32 %b) nounwind {
+; RV32ZKSED-LABEL: sm4ks_i32:
+; RV32ZKSED:       # %bb.0:
+; RV32ZKSED-NEXT:    sm4ks a0, a0, a1, 2
+; RV32ZKSED-NEXT:    ret
+  %val = call i32 @llvm.riscv.sm4ks.i32(i32 %a, i32 %b, i8 2)
+  ret i32 %val
+}
+
+declare i32 @llvm.riscv.sm4ed.i32(i32, i32, i8);
+
+define i32 @sm4ed_i32(i32 %a, i32 %b) nounwind {
+; RV32ZKSED-LABEL: sm4ed_i32:
+; RV32ZKSED:       # %bb.0:
+; RV32ZKSED-NEXT:    sm4ed a0, a0, a1, 3
+; RV32ZKSED-NEXT:    ret
+  %val = call i32 @llvm.riscv.sm4ed.i32(i32 %a, i32 %b, i8 3)
+  ret i32 %val
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll
new file mode 100644
index 0000000000000..43cdf5c8fa2cb
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+zksh -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZKSH
+
+declare i32 @llvm.riscv.sm3p0.i32(i32);
+
+define i32 @sm3p0_i32(i32 %a) nounwind {
+; RV32ZKSH-LABEL: sm3p0_i32:
+; RV32ZKSH:       # %bb.0:
+; RV32ZKSH-NEXT:    sm3p0 a0, a0
+; RV32ZKSH-NEXT:    ret
+  %val = call i32 @llvm.riscv.sm3p0.i32(i32 %a)
+  ret i32 %val
+}
+
+declare i32 @llvm.riscv.sm3p1.i32(i32);
+
+define i32 @sm3p1_i32(i32 %a) nounwind {
+; RV32ZKSH-LABEL: sm3p1_i32:
+; RV32ZKSH:       # %bb.0:
+; RV32ZKSH-NEXT:    sm3p1 a0, a0
+; RV32ZKSH-NEXT:    ret
+  %val = call i32 @llvm.riscv.sm3p1.i32(i32 %a)
+  ret i32 %val
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbp-zbkb.ll
similarity index 56%
rename from llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
rename to llvm/test/CodeGen/RISCV/rv64zbb-zbp-zbkb.ll
index d166cbdad8875..d519b14068868 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbp-zbkb.ll
@@ -1,10 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV64I
+; RUN:   | FileCheck %s -check-prefixes=RV64I
 ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV64ZBB
+; RUN:   | FileCheck %s -check-prefixes=RV64ZBB-ZBP-ZBKB
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbp -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV64ZBP
+; RUN:   | FileCheck %s -check-prefixes=RV64ZBB-ZBP-ZBKB
+; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV64ZBB-ZBP-ZBKB
 
 define signext i32 @andn_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: andn_i32:
@@ -13,15 +15,10 @@ define signext i32 @andn_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: andn_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    andn a0, a0, a1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: andn_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    andn a0, a0, a1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: andn_i32:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    andn a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %neg = xor i32 %b, -1
   %and = and i32 %neg, %a
   ret i32 %and
@@ -34,15 +31,10 @@ define i64 @andn_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: andn_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    andn a0, a0, a1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: andn_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    andn a0, a0, a1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: andn_i64:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    andn a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %neg = xor i64 %b, -1
   %and = and i64 %neg, %a
   ret i64 %and
@@ -55,15 +47,10 @@ define signext i32 @orn_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: orn_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    orn a0, a0, a1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: orn_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orn a0, a0, a1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: orn_i32:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    orn a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %neg = xor i32 %b, -1
   %or = or i32 %neg, %a
   ret i32 %or
@@ -76,15 +63,10 @@ define i64 @orn_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: orn_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    orn a0, a0, a1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: orn_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orn a0, a0, a1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: orn_i64:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    orn a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %neg = xor i64 %b, -1
   %or = or i64 %neg, %a
   ret i64 %or
@@ -97,15 +79,10 @@ define signext i32 @xnor_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: xnor_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    xnor a0, a0, a1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: xnor_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    xnor a0, a0, a1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: xnor_i32:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    xnor a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %neg = xor i32 %a, -1
   %xor = xor i32 %neg, %b
   ret i32 %xor
@@ -118,15 +95,10 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: xnor_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    xnor a0, a0, a1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: xnor_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    xnor a0, a0, a1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: xnor_i64:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    xnor a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %neg = xor i64 %a, -1
   %xor = xor i64 %neg, %b
   ret i64 %xor
@@ -143,15 +115,10 @@ define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: rol_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    rolw a0, a0, a1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: rol_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rolw a0, a0, a1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: rol_i32:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    rolw a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b)
   ret i32 %1
 }
@@ -167,17 +134,11 @@ define void @rol_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
 ; RV64I-NEXT:    sw a0, 0(a2)
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: rol_i32_nosext:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    rolw a0, a0, a1
-; RV64ZBB-NEXT:    sw a0, 0(a2)
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: rol_i32_nosext:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rolw a0, a0, a1
-; RV64ZBP-NEXT:    sw a0, 0(a2)
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: rol_i32_nosext:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    rolw a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    sw a0, 0(a2)
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b)
   store i32 %1, i32* %x
   ret void
@@ -193,17 +154,11 @@ define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind {
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: rol_i32_neg_constant_rhs:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    li a1, -2
-; RV64ZBB-NEXT:    rolw a0, a1, a0
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: rol_i32_neg_constant_rhs:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    li a1, -2
-; RV64ZBP-NEXT:    rolw a0, a1, a0
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: rol_i32_neg_constant_rhs:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    li a1, -2
+; RV64ZBB-ZBP-ZBKB-NEXT:    rolw a0, a1, a0
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 -2, i32 -2, i32 %a)
   ret i32 %1
 }
@@ -219,15 +174,10 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: rol_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    rol a0, a0, a1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: rol_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rol a0, a0, a1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: rol_i64:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    rol a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %or = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %b)
   ret i64 %or
 }
@@ -243,15 +193,10 @@ define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: ror_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    rorw a0, a0, a1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: ror_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rorw a0, a0, a1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: ror_i32:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    rorw a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b)
   ret i32 %1
 }
@@ -267,17 +212,11 @@ define void @ror_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
 ; RV64I-NEXT:    sw a0, 0(a2)
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: ror_i32_nosext:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    rorw a0, a0, a1
-; RV64ZBB-NEXT:    sw a0, 0(a2)
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: ror_i32_nosext:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rorw a0, a0, a1
-; RV64ZBP-NEXT:    sw a0, 0(a2)
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: ror_i32_nosext:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    rorw a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    sw a0, 0(a2)
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b)
   store i32 %1, i32* %x
   ret void
@@ -293,17 +232,11 @@ define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind {
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: ror_i32_neg_constant_rhs:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    li a1, -2
-; RV64ZBB-NEXT:    rorw a0, a1, a0
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: ror_i32_neg_constant_rhs:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    li a1, -2
-; RV64ZBP-NEXT:    rorw a0, a1, a0
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: ror_i32_neg_constant_rhs:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    li a1, -2
+; RV64ZBB-ZBP-ZBKB-NEXT:    rorw a0, a1, a0
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 -2, i32 -2, i32 %a)
   ret i32 %1
 }
@@ -319,15 +252,10 @@ define i64 @ror_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: ror_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    ror a0, a0, a1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: ror_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    ror a0, a0, a1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: ror_i64:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    ror a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %or = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %b)
   ret i64 %or
 }
@@ -340,15 +268,10 @@ define signext i32 @rori_i32_fshl(i32 signext %a) nounwind {
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: rori_i32_fshl:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    roriw a0, a0, 1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: rori_i32_fshl:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    roriw a0, a0, 1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: rori_i32_fshl:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    roriw a0, a0, 1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31)
   ret i32 %1
 }
@@ -363,17 +286,11 @@ define void @rori_i32_fshl_nosext(i32 signext %a, i32* %x) nounwind {
 ; RV64I-NEXT:    sw a0, 0(a1)
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: rori_i32_fshl_nosext:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    roriw a0, a0, 1
-; RV64ZBB-NEXT:    sw a0, 0(a1)
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: rori_i32_fshl_nosext:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    roriw a0, a0, 1
-; RV64ZBP-NEXT:    sw a0, 0(a1)
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: rori_i32_fshl_nosext:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    roriw a0, a0, 1
+; RV64ZBB-ZBP-ZBKB-NEXT:    sw a0, 0(a1)
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31)
   store i32 %1, i32* %x
   ret void
@@ -387,15 +304,10 @@ define signext i32 @rori_i32_fshr(i32 signext %a) nounwind {
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: rori_i32_fshr:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    roriw a0, a0, 31
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: rori_i32_fshr:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    roriw a0, a0, 31
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: rori_i32_fshr:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    roriw a0, a0, 31
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31)
   ret i32 %1
 }
@@ -410,17 +322,11 @@ define void @rori_i32_fshr_nosext(i32 signext %a, i32* %x) nounwind {
 ; RV64I-NEXT:    sw a0, 0(a1)
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: rori_i32_fshr_nosext:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    roriw a0, a0, 31
-; RV64ZBB-NEXT:    sw a0, 0(a1)
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: rori_i32_fshr_nosext:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    roriw a0, a0, 31
-; RV64ZBP-NEXT:    sw a0, 0(a1)
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: rori_i32_fshr_nosext:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    roriw a0, a0, 31
+; RV64ZBB-ZBP-ZBKB-NEXT:    sw a0, 0(a1)
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31)
   store i32 %1, i32* %x
   ret void
@@ -437,19 +343,12 @@ define signext i32 @not_rori_i32(i32 signext %x, i32 signext %y) nounwind {
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: not_rori_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    slliw a0, a0, 31
-; RV64ZBB-NEXT:    srliw a1, a1, 1
-; RV64ZBB-NEXT:    or a0, a0, a1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: not_rori_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    slliw a0, a0, 31
-; RV64ZBP-NEXT:    srliw a1, a1, 1
-; RV64ZBP-NEXT:    or a0, a0, a1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: not_rori_i32:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    slliw a0, a0, 31
+; RV64ZBB-ZBP-ZBKB-NEXT:    srliw a1, a1, 1
+; RV64ZBB-ZBP-ZBKB-NEXT:    or a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %a = shl i32 %x, 31
   %b = lshr i32 %y, 1
   %c = or i32 %a, %b
@@ -470,25 +369,15 @@ define i64 @roriw_bug(i64 %x) nounwind {
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: roriw_bug:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    slli a1, a0, 31
-; RV64ZBB-NEXT:    andi a0, a0, -2
-; RV64ZBB-NEXT:    srli a2, a0, 1
-; RV64ZBB-NEXT:    or a1, a1, a2
-; RV64ZBB-NEXT:    sext.w a1, a1
-; RV64ZBB-NEXT:    xor a0, a0, a1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: roriw_bug:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    slli a1, a0, 31
-; RV64ZBP-NEXT:    andi a0, a0, -2
-; RV64ZBP-NEXT:    srli a2, a0, 1
-; RV64ZBP-NEXT:    or a1, a1, a2
-; RV64ZBP-NEXT:    sext.w a1, a1
-; RV64ZBP-NEXT:    xor a0, a0, a1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: roriw_bug:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    slli a1, a0, 31
+; RV64ZBB-ZBP-ZBKB-NEXT:    andi a0, a0, -2
+; RV64ZBB-ZBP-ZBKB-NEXT:    srli a2, a0, 1
+; RV64ZBB-ZBP-ZBKB-NEXT:    or a1, a1, a2
+; RV64ZBB-ZBP-ZBKB-NEXT:    sext.w a1, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    xor a0, a0, a1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %a = shl i64 %x, 31
   %b = and i64 %x, 18446744073709551614
   %c = lshr i64 %b, 1
@@ -507,15 +396,10 @@ define i64 @rori_i64_fshl(i64 %a) nounwind {
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: rori_i64_fshl:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    rori a0, a0, 1
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: rori_i64_fshl:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rori a0, a0, 1
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: rori_i64_fshl:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    rori a0, a0, 1
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 63)
   ret i64 %1
 }
@@ -528,15 +412,10 @@ define i64 @rori_i64_fshr(i64 %a) nounwind {
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: rori_i64_fshr:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    rori a0, a0, 63
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: rori_i64_fshr:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rori a0, a0, 63
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: rori_i64_fshr:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    rori a0, a0, 63
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 63)
   ret i64 %1
 }
@@ -548,17 +427,11 @@ define i8 @srli_i8(i8 %a) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 62
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: srli_i8:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    slli a0, a0, 56
-; RV64ZBB-NEXT:    srli a0, a0, 62
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: srli_i8:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    slli a0, a0, 56
-; RV64ZBP-NEXT:    srli a0, a0, 62
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: srli_i8:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    slli a0, a0, 56
+; RV64ZBB-ZBP-ZBKB-NEXT:    srli a0, a0, 62
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = lshr i8 %a, 6
   ret i8 %1
 }
@@ -572,17 +445,11 @@ define i8 @srai_i8(i8 %a) nounwind {
 ; RV64I-NEXT:    srai a0, a0, 61
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: srai_i8:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    slli a0, a0, 56
-; RV64ZBB-NEXT:    srai a0, a0, 61
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: srai_i8:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    slli a0, a0, 56
-; RV64ZBP-NEXT:    srai a0, a0, 61
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: srai_i8:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    slli a0, a0, 56
+; RV64ZBB-ZBP-ZBKB-NEXT:    srai a0, a0, 61
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = ashr i8 %a, 5
   ret i8 %1
 }
@@ -596,17 +463,11 @@ define i16 @srli_i16(i16 %a) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 54
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: srli_i16:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    slli a0, a0, 48
-; RV64ZBB-NEXT:    srli a0, a0, 54
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: srli_i16:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    slli a0, a0, 48
-; RV64ZBP-NEXT:    srli a0, a0, 54
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: srli_i16:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    slli a0, a0, 48
+; RV64ZBB-ZBP-ZBKB-NEXT:    srli a0, a0, 54
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = lshr i16 %a, 6
   ret i16 %1
 }
@@ -620,17 +481,11 @@ define i16 @srai_i16(i16 %a) nounwind {
 ; RV64I-NEXT:    srai a0, a0, 57
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: srai_i16:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    slli a0, a0, 48
-; RV64ZBB-NEXT:    srai a0, a0, 57
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: srai_i16:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    slli a0, a0, 48
-; RV64ZBP-NEXT:    srai a0, a0, 57
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: srai_i16:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    slli a0, a0, 48
+; RV64ZBB-ZBP-ZBKB-NEXT:    srai a0, a0, 57
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %1 = ashr i16 %a, 9
   ret i16 %1
 }
@@ -643,17 +498,11 @@ define i1 @andn_seqz_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: andn_seqz_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    andn a0, a1, a0
-; RV64ZBB-NEXT:    seqz a0, a0
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: andn_seqz_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    andn a0, a1, a0
-; RV64ZBP-NEXT:    seqz a0, a0
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: andn_seqz_i32:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    andn a0, a1, a0
+; RV64ZBB-ZBP-ZBKB-NEXT:    seqz a0, a0
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %and = and i32 %a, %b
   %cmpeq = icmp eq i32 %and, %b
   ret i1 %cmpeq
@@ -667,17 +516,11 @@ define i1 @andn_seqz_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: andn_seqz_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    andn a0, a1, a0
-; RV64ZBB-NEXT:    seqz a0, a0
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: andn_seqz_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    andn a0, a1, a0
-; RV64ZBP-NEXT:    seqz a0, a0
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: andn_seqz_i64:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    andn a0, a1, a0
+; RV64ZBB-ZBP-ZBKB-NEXT:    seqz a0, a0
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %and = and i64 %a, %b
   %cmpeq = icmp eq i64 %and, %b
   ret i1 %cmpeq
@@ -691,17 +534,11 @@ define i1 @andn_snez_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: andn_snez_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    andn a0, a1, a0
-; RV64ZBB-NEXT:    snez a0, a0
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: andn_snez_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    andn a0, a1, a0
-; RV64ZBP-NEXT:    snez a0, a0
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: andn_snez_i32:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    andn a0, a1, a0
+; RV64ZBB-ZBP-ZBKB-NEXT:    snez a0, a0
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %and = and i32 %a, %b
   %cmpeq = icmp ne i32 %and, %b
   ret i1 %cmpeq
@@ -715,17 +552,11 @@ define i1 @andn_snez_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-LABEL: andn_snez_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    andn a0, a1, a0
-; RV64ZBB-NEXT:    snez a0, a0
-; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: andn_snez_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    andn a0, a1, a0
-; RV64ZBP-NEXT:    snez a0, a0
-; RV64ZBP-NEXT:    ret
+; RV64ZBB-ZBP-ZBKB-LABEL: andn_snez_i64:
+; RV64ZBB-ZBP-ZBKB:       # %bb.0:
+; RV64ZBB-ZBP-ZBKB-NEXT:    andn a0, a1, a0
+; RV64ZBB-ZBP-ZBKB-NEXT:    snez a0, a0
+; RV64ZBB-ZBP-ZBKB-NEXT:    ret
   %and = and i64 %a, %b
   %cmpeq = icmp ne i64 %and, %b
   ret i1 %cmpeq

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll
index b99a84e445a6c..87afe13bfdf0c 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll
@@ -2,28 +2,6 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+zbc -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64ZBC
 
-declare i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b)
-
-define i64 @clmul64(i64 %a, i64 %b) nounwind {
-; RV64ZBC-LABEL: clmul64:
-; RV64ZBC:       # %bb.0:
-; RV64ZBC-NEXT:    clmul a0, a0, a1
-; RV64ZBC-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b)
- ret i64 %tmp
-}
-
-declare i64 @llvm.riscv.clmulh.i64(i64 %a, i64 %b)
-
-define i64 @clmul64h(i64 %a, i64 %b) nounwind {
-; RV64ZBC-LABEL: clmul64h:
-; RV64ZBC:       # %bb.0:
-; RV64ZBC-NEXT:    clmulh a0, a0, a1
-; RV64ZBC-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.clmulh.i64(i64 %a, i64 %b)
- ret i64 %tmp
-}
-
 declare i64 @llvm.riscv.clmulr.i64(i64 %a, i64 %b)
 
 define i64 @clmul64r(i64 %a, i64 %b) nounwind {

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll
new file mode 100644
index 0000000000000..180fef9b477e2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+zbc -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZBC-ZBKC
+; RUN: llc -mtriple=riscv64 -mattr=+zbkc -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZBC-ZBKC
+
+declare i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b)
+
+define i64 @clmul64(i64 %a, i64 %b) nounwind {
+; RV64ZBC-ZBKC-LABEL: clmul64:
+; RV64ZBC-ZBKC:       # %bb.0:
+; RV64ZBC-ZBKC-NEXT:    clmul a0, a0, a1
+; RV64ZBC-ZBKC-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b)
+ ret i64 %tmp
+}
+
+declare i64 @llvm.riscv.clmulh.i64(i64 %a, i64 %b)
+
+define i64 @clmul64h(i64 %a, i64 %b) nounwind {
+; RV64ZBC-ZBKC-LABEL: clmul64h:
+; RV64ZBC-ZBKC:       # %bb.0:
+; RV64ZBC-ZBKC-NEXT:    clmulh a0, a0, a1
+; RV64ZBC-ZBKC-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.clmulh.i64(i64 %a, i64 %b)
+ ret i64 %tmp
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll
new file mode 100644
index 0000000000000..ec3196beacd0e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZBKB
+
+declare i64 @llvm.riscv.brev8(i64)
+
+define i64 @brev8(i64 %a) nounwind {
+; RV64ZBKB-LABEL: brev8:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    ret
+  %val = call i64 @llvm.riscv.brev8(i64 %a)
+  ret i64 %val
+}
+
+declare i64 @llvm.bswap.i64(i64)
+
+define i64 @rev8_i64(i64 %a) {
+; RV64ZBKB-LABEL: rev8_i64:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    ret
+  %1 = call i64 @llvm.bswap.i64(i64 %a)
+  ret i64 %1
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll
new file mode 100644
index 0000000000000..19ccdfe5303d8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mtriple=riscv64 -mattr=+zbkx -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZBKX
+
+declare i64 @llvm.riscv.xperm8.i64(i64 %a, i64 %b)
+
+define i64 @xperm8(i64 %a, i64 %b) nounwind {
+; RV64ZBKX-LABEL: xperm8:
+; RV64ZBKX:       # %bb.0:
+; RV64ZBKX-NEXT:    xperm8 a0, a0, a1
+; RV64ZBKX-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.xperm8.i64(i64 %a, i64 %b)
+ ret i64 %tmp
+}
+
+declare i64 @llvm.riscv.xperm4.i64(i64 %a, i64 %b)
+
+define i64 @xperm4(i64 %a, i64 %b) nounwind {
+; RV64ZBKX-LABEL: xperm4:
+; RV64ZBKX:       # %bb.0:
+; RV64ZBKX-NEXT:    xperm4 a0, a0, a1
+; RV64ZBKX-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.xperm4.i64(i64 %a, i64 %b)
+ ret i64 %tmp
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbp-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbp-zbkb.ll
new file mode 100644
index 0000000000000..a2c27e028fca9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64zbp-zbkb.ll
@@ -0,0 +1,125 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbp -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZBP-ZBKB
+; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZBP-ZBKB
+
+define signext i32 @pack_i32(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: pack_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    slliw a1, a1, 16
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBP-ZBKB-LABEL: pack_i32:
+; RV64ZBP-ZBKB:       # %bb.0:
+; RV64ZBP-ZBKB-NEXT:    packw a0, a0, a1
+; RV64ZBP-ZBKB-NEXT:    ret
+  %shl = and i32 %a, 65535
+  %shl1 = shl i32 %b, 16
+  %or = or i32 %shl1, %shl
+  ret i32 %or
+}
+
+define i64 @pack_i64(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: pack_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBP-ZBKB-LABEL: pack_i64:
+; RV64ZBP-ZBKB:       # %bb.0:
+; RV64ZBP-ZBKB-NEXT:    pack a0, a0, a1
+; RV64ZBP-ZBKB-NEXT:    ret
+  %shl = and i64 %a, 4294967295
+  %shl1 = shl i64 %b, 32
+  %or = or i64 %shl1, %shl
+  ret i64 %or
+}
+
+define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: packh_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srli a1, a1, 48
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBP-ZBKB-LABEL: packh_i32:
+; RV64ZBP-ZBKB:       # %bb.0:
+; RV64ZBP-ZBKB-NEXT:    packh a0, a0, a1
+; RV64ZBP-ZBKB-NEXT:    ret
+  %and = and i32 %a, 255
+  %and1 = shl i32 %b, 8
+  %shl = and i32 %and1, 65280
+  %or = or i32 %shl, %and
+  ret i32 %or
+}
+
+define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: packh_i32_2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    andi a1, a1, 255
+; RV64I-NEXT:    slli a1, a1, 8
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBP-ZBKB-LABEL: packh_i32_2:
+; RV64ZBP-ZBKB:       # %bb.0:
+; RV64ZBP-ZBKB-NEXT:    packh a0, a0, a1
+; RV64ZBP-ZBKB-NEXT:    ret
+  %and = and i32 %a, 255
+  %and1 = and i32 %b, 255
+  %shl = shl i32 %and1, 8
+  %or = or i32 %shl, %and
+  ret i32 %or
+}
+
+define i64 @packh_i64(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: packh_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srli a1, a1, 48
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBP-ZBKB-LABEL: packh_i64:
+; RV64ZBP-ZBKB:       # %bb.0:
+; RV64ZBP-ZBKB-NEXT:    packh a0, a0, a1
+; RV64ZBP-ZBKB-NEXT:    ret
+  %and = and i64 %a, 255
+  %and1 = shl i64 %b, 8
+  %shl = and i64 %and1, 65280
+  %or = or i64 %shl, %and
+  ret i64 %or
+}
+
+define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: packh_i64_2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    andi a1, a1, 255
+; RV64I-NEXT:    slli a1, a1, 8
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBP-ZBKB-LABEL: packh_i64_2:
+; RV64ZBP-ZBKB:       # %bb.0:
+; RV64ZBP-ZBKB-NEXT:    packh a0, a0, a1
+; RV64ZBP-ZBKB-NEXT:    ret
+  %and = and i64 %a, 255
+  %and1 = and i64 %b, 255
+  %shl = shl i64 %and1, 8
+  %or = or i64 %shl, %and
+  ret i64 %or
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbp.ll
index b68a98b13052b..6a4376409fab5 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbp.ll
@@ -2713,44 +2713,6 @@ define i64 @shfl16(i64 %a, i64 %b) nounwind {
   ret i64 %or3
 }
 
-define signext i32 @pack_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: pack_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 48
-; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    slliw a1, a1, 16
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: pack_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    packw a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %shl = and i32 %a, 65535
-  %shl1 = shl i32 %b, 16
-  %or = or i32 %shl1, %shl
-  ret i32 %or
-}
-
-define i64 @pack_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: pack_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    slli a1, a1, 32
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: pack_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    pack a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %shl = and i64 %a, 4294967295
-  %shl1 = shl i64 %b, 32
-  %or = or i64 %shl1, %shl
-  ret i64 %or
-}
-
 define signext i32 @packu_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: packu_i32:
 ; RV64I:       # %bb.0:
@@ -2790,86 +2752,6 @@ define i64 @packu_i64(i64 %a, i64 %b) nounwind {
   ret i64 %or
 }
 
-define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: packh_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    andi a0, a0, 255
-; RV64I-NEXT:    slli a1, a1, 56
-; RV64I-NEXT:    srli a1, a1, 48
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: packh_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    packh a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %and = and i32 %a, 255
-  %and1 = shl i32 %b, 8
-  %shl = and i32 %and1, 65280
-  %or = or i32 %shl, %and
-  ret i32 %or
-}
-
-define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
-; RV64I-LABEL: packh_i32_2:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    andi a0, a0, 255
-; RV64I-NEXT:    andi a1, a1, 255
-; RV64I-NEXT:    slli a1, a1, 8
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: packh_i32_2:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    packh a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %and = and i32 %a, 255
-  %and1 = and i32 %b, 255
-  %shl = shl i32 %and1, 8
-  %or = or i32 %shl, %and
-  ret i32 %or
-}
-
-define i64 @packh_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: packh_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    andi a0, a0, 255
-; RV64I-NEXT:    slli a1, a1, 56
-; RV64I-NEXT:    srli a1, a1, 48
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: packh_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    packh a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %and = and i64 %a, 255
-  %and1 = shl i64 %b, 8
-  %shl = and i64 %and1, 65280
-  %or = or i64 %shl, %and
-  ret i64 %or
-}
-
-define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: packh_i64_2:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    andi a0, a0, 255
-; RV64I-NEXT:    andi a1, a1, 255
-; RV64I-NEXT:    slli a1, a1, 8
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: packh_i64_2:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    packh a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %and = and i64 %a, 255
-  %and1 = and i64 %b, 255
-  %shl = shl i64 %and1, 8
-  %or = or i64 %shl, %and
-  ret i64 %or
-}
-
 define i32 @zexth_i32(i32 %a) nounwind {
 ; RV64I-LABEL: zexth_i32:
 ; RV64I:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll
new file mode 100644
index 0000000000000..230ae7134e457
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll
@@ -0,0 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+zknd -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZKND
+
+declare i64 @llvm.riscv.aes64ds(i64, i64);
+
+define i64 @aes64ds(i64 %a, i64 %b) nounwind {
+; RV64ZKND-LABEL: aes64ds
+; RV64ZKND: # %bb.0:
+; RV64ZKND-NEXT: aes64ds a0, a0, a1
+; RV64ZKND-NEXT: ret
+    %val = call i64 @llvm.riscv.aes64ds(i64 %a, i64 %b)
+    ret i64 %val
+}
+
+declare i64 @llvm.riscv.aes64dsm(i64, i64);
+
+define i64 @aes64dsm(i64 %a, i64 %b) nounwind {
+; RV64ZKND-LABEL: aes64dsm
+; RV64ZKND: # %bb.0:
+; RV64ZKND-NEXT: aes64dsm a0, a0, a1
+; RV64ZKND-NEXT: ret
+    %val = call i64 @llvm.riscv.aes64dsm(i64 %a, i64 %b)
+    ret i64 %val
+}
+
+declare i64 @llvm.riscv.aes64im(i64);
+
+define i64 @aes64im(i64 %a) nounwind {
+; RV64ZKND-LABEL: aes64im
+; RV64ZKND: # %bb.0:
+; RV64ZKND-NEXT: aes64im a0, a0
+; RV64ZKND-NEXT: ret
+    %val = call i64 @llvm.riscv.aes64im(i64 %a)
+    ret i64 %val
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll
new file mode 100644
index 0000000000000..23559b97410ee
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+zknd -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZKND-ZKNE
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+zkne -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZKND-ZKNE
+
+declare i64 @llvm.riscv.aes64ks2(i64, i64);
+
+define i64 @aes64ks2(i64 %a, i64 %b) nounwind {
+; RV64ZKND-ZKNE-LABEL: aes64ks2
+; RV64ZKND-ZKNE: # %bb.0:
+; RV64ZKND-ZKNE-NEXT: aes64ks2 a0, a0, a1
+; RV64ZKND-ZKNE-NEXT: ret
+    %val = call i64 @llvm.riscv.aes64ks2(i64 %a, i64 %b)
+    ret i64 %val
+}
+
+declare i64 @llvm.riscv.aes64ks1i(i64, i32);
+
+define i64 @aes64ks1i(i64 %a) nounwind {
+; RV64ZKND-ZKNE-LABEL: aes64ks1i
+; RV64ZKND-ZKNE: # %bb.0:
+; RV64ZKND-ZKNE-NEXT: aes64ks1i a0, a0, 10
+; RV64ZKND-ZKNE-NEXT: ret
+    %val = call i64 @llvm.riscv.aes64ks1i(i64 %a, i32 10)
+    ret i64 %val
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll
new file mode 100644
index 0000000000000..1697769d2b9d4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+zkne -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZKNE
+
+declare i64 @llvm.riscv.aes64es(i64, i64);
+
+define i64 @aes64es(i64 %a, i64 %b) nounwind {
+; RV64ZKNE-LABEL: aes64es
+; RV64ZKNE: # %bb.0:
+; RV64ZKNE-NEXT: aes64es a0, a0, a1
+; RV64ZKNE-NEXT: ret
+    %val = call i64 @llvm.riscv.aes64es(i64 %a, i64 %b)
+    ret i64 %val
+}
+
+declare i64 @llvm.riscv.aes64esm(i64, i64);
+
+define i64 @aes64esm(i64 %a, i64 %b) nounwind {
+; RV64ZKNE-LABEL: aes64esm
+; RV64ZKNE: # %bb.0:
+; RV64ZKNE-NEXT: aes64esm a0, a0, a1
+; RV64ZKNE-NEXT: ret
+    %val = call i64 @llvm.riscv.aes64esm(i64 %a, i64 %b)
+    ret i64 %val
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll
new file mode 100644
index 0000000000000..b77fe1c4cf7d2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll
@@ -0,0 +1,92 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+zknh -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZKNH
+
+
+declare i64 @llvm.riscv.sha256sig0.i64(i64);
+
+define i64 @sha256sig0_i64(i64 %a) nounwind {
+; RV64ZKNH-LABEL: sha256sig0_i64
+; RV64ZKNH: # %bb.0:
+; RV64ZKNH-NEXT: sha256sig0 a0, a0
+; RV64ZKNH-NEXT: ret
+    %val = call i64 @llvm.riscv.sha256sig0.i64(i64 %a)
+    ret i64 %val
+}
+
+declare i64 @llvm.riscv.sha256sig1.i64(i64);
+
+define i64 @sha256sig1_i64(i64 %a) nounwind {
+; RV64ZKNH-LABEL: sha256sig1_i64
+; RV64ZKNH: # %bb.0:
+; RV64ZKNH-NEXT: sha256sig1 a0, a0
+; RV64ZKNH-NEXT: ret
+    %val = call i64 @llvm.riscv.sha256sig1.i64(i64 %a)
+    ret i64 %val
+}
+
+declare i64 @llvm.riscv.sha256sum0.i64(i64);
+
+define i64 @sha256sum0_i64(i64 %a) nounwind {
+; RV64ZKNH-LABEL: sha256sum0_i64
+; RV64ZKNH: # %bb.0:
+; RV64ZKNH-NEXT: sha256sum0 a0, a0
+; RV64ZKNH-NEXT: ret
+    %val = call i64 @llvm.riscv.sha256sum0.i64(i64 %a)
+    ret i64 %val
+}
+
+declare i64 @llvm.riscv.sha256sum1.i64(i64);
+
+define i64 @sha256sum1_i64(i64 %a) nounwind {
+; RV64ZKNH-LABEL: sha256sum1_i64
+; RV64ZKNH: # %bb.0:
+; RV64ZKNH-NEXT: sha256sum1 a0, a0
+; RV64ZKNH-NEXT: ret
+    %val = call i64 @llvm.riscv.sha256sum1.i64(i64 %a)
+    ret i64 %val
+}
+
+declare i64 @llvm.riscv.sha512sig0(i64);
+
+define i64 @sha512sig0(i64 %a) nounwind {
+; RV64ZKNH-LABEL: sha512sig0
+; RV64ZKNH: # %bb.0:
+; RV64ZKNH-NEXT: sha512sig0 a0, a0
+; RV64ZKNH-NEXT: ret
+    %val = call i64 @llvm.riscv.sha512sig0(i64 %a)
+    ret i64 %val
+}
+
+declare i64 @llvm.riscv.sha512sig1(i64);
+
+define i64 @sha512sig1(i64 %a) nounwind {
+; RV64ZKNH-LABEL: sha512sig1
+; RV64ZKNH: # %bb.0:
+; RV64ZKNH-NEXT: sha512sig1 a0, a0
+; RV64ZKNH-NEXT: ret
+    %val = call i64 @llvm.riscv.sha512sig1(i64 %a)
+    ret i64 %val
+}
+
+declare i64 @llvm.riscv.sha512sum0(i64);
+
+define i64 @sha512sum0(i64 %a) nounwind {
+; RV64ZKNH-LABEL: sha512sum0
+; RV64ZKNH: # %bb.0:
+; RV64ZKNH-NEXT: sha512sum0 a0, a0
+; RV64ZKNH-NEXT: ret
+    %val = call i64 @llvm.riscv.sha512sum0(i64 %a)
+    ret i64 %val
+}
+
+declare i64 @llvm.riscv.sha512sum1(i64);
+
+define i64 @sha512sum1(i64 %a) nounwind {
+; RV64ZKNH-LABEL: sha512sum1
+; RV64ZKNH: # %bb.0:
+; RV64ZKNH-NEXT: sha512sum1 a0, a0
+; RV64ZKNH-NEXT: ret
+    %val = call i64 @llvm.riscv.sha512sum1(i64 %a)
+    ret i64 %val
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll
new file mode 100644
index 0000000000000..2fa7601906067
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+zksed -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZKSED
+
+declare i64 @llvm.riscv.sm4ks.i64(i64, i64, i8);
+
+define i64 @sm4ks_i64(i64 %a, i64 %b) nounwind {
+; RV64ZKSED-LABEL: sm4ks_i64:
+; RV64ZKSED:       # %bb.0:
+; RV64ZKSED-NEXT:    sm4ks a0, a0, a1, 0
+; RV64ZKSED-NEXT:    ret
+  %val = call i64 @llvm.riscv.sm4ks.i64(i64 %a, i64 %b, i8 0)
+  ret i64 %val
+}
+
+declare i64 @llvm.riscv.sm4ed.i64(i64, i64, i8);
+
+define i64 @sm4ed_i64(i64 %a, i64 %b) nounwind {
+; RV64ZKSED-LABEL: sm4ed_i64:
+; RV64ZKSED:       # %bb.0:
+; RV64ZKSED-NEXT:    sm4ed a0, a0, a1, 1
+; RV64ZKSED-NEXT:    ret
+  %val = call i64 @llvm.riscv.sm4ed.i64(i64 %a, i64 %b, i8 1)
+  ret i64 %val
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll
new file mode 100644
index 0000000000000..8790ec1af24dd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+zksh -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZKSH
+
+declare i64 @llvm.riscv.sm3p0.i64(i64);
+
+define i64 @sm3p0_i64(i64 %a) nounwind {
+; RV64ZKSH-LABEL: sm3p0_i64:
+; RV64ZKSH:       # %bb.0:
+; RV64ZKSH-NEXT:    sm3p0 a0, a0
+; RV64ZKSH-NEXT:    ret
+  %val = call i64 @llvm.riscv.sm3p0.i64(i64 %a)
+  ret i64 %val
+}
+
+declare i64 @llvm.riscv.sm3p1.i64(i64);
+
+define i64 @sm3p1_i64(i64 %a) nounwind {
+; RV64ZKSH-LABEL: sm3p1_i64:
+; RV64ZKSH:       # %bb.0:
+; RV64ZKSH-NEXT:    sm3p1 a0, a0
+; RV64ZKSH-NEXT:    ret
+  %val = call i64 @llvm.riscv.sm3p1.i64(i64 %a)
+  ret i64 %val
+}


        


More information about the llvm-commits mailing list