[llvm] [NVPTX] Use PRMT more widely, and improve folding around this instruction (PR #148261)

via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 11 10:15:10 PDT 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-nvptx

Author: Alex MacLean (AlexMaclean)

<details>
<summary>Changes</summary>

Replace uses of BFE with PRMT when lowering v4i8 vectors. This will generally lead to equivalent or better SASS and reduces the number of target specific operations we need to represent. (https://cuda.godbolt.org/z/M75W6f8xd) Also implement KnownBits tracking for PRMT allowing elimination of redundant AND instructions when lowering various i8 operations. 

---

Patch is 122.67 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/148261.diff


10 Files Affected:

- (modified) llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp (+52-34) 
- (modified) llvm/lib/Target/NVPTX/NVPTXISelLowering.h (+5-1) 
- (modified) llvm/lib/Target/NVPTX/NVPTXInstrInfo.td (+45-44) 
- (modified) llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll (+63-63) 
- (modified) llvm/test/CodeGen/NVPTX/extractelement.ll (+32-32) 
- (modified) llvm/test/CodeGen/NVPTX/i8x4-instructions.ll (+191-211) 
- (modified) llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll (+16-16) 
- (modified) llvm/test/CodeGen/NVPTX/ldg-invariant.ll (+12-12) 
- (modified) llvm/test/CodeGen/NVPTX/load-store-vectors.ll (+224-224) 
- (modified) llvm/test/CodeGen/NVPTX/sext-setcc.ll (+12-16) 


``````````diff
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index bb0aeb493ed48..efc0ab0972f30 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -57,6 +57,7 @@
 #include "llvm/Support/CodeGen.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
 #include "llvm/Support/NVPTXAddrSpace.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Target/TargetMachine.h"
@@ -1070,7 +1071,6 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
     MAKE_CASE(NVPTXISD::StoreV8)
     MAKE_CASE(NVPTXISD::FSHL_CLAMP)
     MAKE_CASE(NVPTXISD::FSHR_CLAMP)
-    MAKE_CASE(NVPTXISD::BFE)
     MAKE_CASE(NVPTXISD::BFI)
     MAKE_CASE(NVPTXISD::PRMT)
     MAKE_CASE(NVPTXISD::FCOPYSIGN)
@@ -2145,14 +2145,15 @@ SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
   EVT VectorVT = Vector.getValueType();
 
   if (VectorVT == MVT::v4i8) {
-    SDValue BFE =
-        DAG.getNode(NVPTXISD::BFE, DL, MVT::i32,
-                    {Vector,
-                     DAG.getNode(ISD::MUL, DL, MVT::i32,
-                                 DAG.getZExtOrTrunc(Index, DL, MVT::i32),
-                                 DAG.getConstant(8, DL, MVT::i32)),
-                     DAG.getConstant(8, DL, MVT::i32)});
-    return DAG.getAnyExtOrTrunc(BFE, DL, Op->getValueType(0));
+    SDValue Selector = DAG.getNode(ISD::OR, DL, MVT::i32,
+      DAG.getZExtOrTrunc(Index, DL, MVT::i32),
+      DAG.getConstant(0x7770, DL, MVT::i32));
+    SDValue PRMT = DAG.getNode(
+        NVPTXISD::PRMT, DL, MVT::i32,
+        {DAG.getBitcast(MVT::i32, Vector), DAG.getConstant(0, DL, MVT::i32),
+         Selector,
+         DAG.getConstant(NVPTX::PTXPrmtMode::NONE, DL, MVT::i32)});
+    return DAG.getAnyExtOrTrunc(PRMT, DL, Op->getValueType(0));
   }
 
   // Constant index will be matched by tablegen.
@@ -5206,31 +5207,6 @@ static SDValue PerformANDCombine(SDNode *N,
 
   SDValue AExt;
 
-  // Convert BFE-> truncate i16 -> and 255
-  // To just BFE-> truncate i16, as the value already has all the bits in the
-  // right places.
-  if (Val.getOpcode() == ISD::TRUNCATE) {
-    SDValue BFE = Val.getOperand(0);
-    if (BFE.getOpcode() != NVPTXISD::BFE)
-      return SDValue();
-
-    ConstantSDNode *BFEBits = dyn_cast<ConstantSDNode>(BFE.getOperand(0));
-    if (!BFEBits)
-      return SDValue();
-    uint64_t BFEBitsVal = BFEBits->getZExtValue();
-
-    ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);
-    if (!MaskCnst) {
-      // Not an AND with a constant
-      return SDValue();
-    }
-    uint64_t MaskVal = MaskCnst->getZExtValue();
-
-    if (MaskVal != (uint64_t(1) << BFEBitsVal) - 1)
-      return SDValue();
-    // If we get here, the AND is unnecessary.  Just replace it with the trunc
-    DCI.CombineTo(N, Val, false);
-  }
   // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and
   if (Val.getOpcode() == ISD::ANY_EXTEND) {
     AExt = Val;
@@ -6334,3 +6310,45 @@ MCSection *NVPTXTargetObjectFile::SelectSectionForGlobal(
     const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
   return getDataSection();
 }
+
+static void computeKnownBitsForPRMT(const SDValue Op, KnownBits &Known,
+                                    const SelectionDAG &DAG, unsigned Depth) {
+  SDValue A = Op.getOperand(0);
+  SDValue B = Op.getOperand(1);
+  ConstantSDNode *Selector = dyn_cast<ConstantSDNode>(Op.getOperand(2));
+  unsigned Mode = Op.getConstantOperandVal(3);
+
+  if (Mode != NVPTX::PTXPrmtMode::NONE || !Selector)
+    return;
+
+  KnownBits AKnown = DAG.computeKnownBits(A, Depth);
+  KnownBits BKnown = DAG.computeKnownBits(B, Depth);
+
+  // {b, a} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}
+  KnownBits BitField = BKnown.concat(AKnown);
+
+  APInt SelectorVal = Selector->getAPIntValue();
+  for (unsigned I : llvm::seq(std::min(4U, Known.getBitWidth() / 8))) {
+    APInt Sel = SelectorVal.extractBits(4, I * 4);
+    unsigned Idx = Sel.getLoBits(3).getZExtValue();
+    unsigned Sign = Sel.getHiBits(1).getZExtValue();
+    KnownBits Byte = BitField.extractBits(8, Idx * 8);
+    if (Sign)
+      Byte = KnownBits::ashr(Byte, 8);
+    Known.insertBits(Byte, I * 8);
+  }
+}
+
+void NVPTXTargetLowering::computeKnownBitsForTargetNode(
+    const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
+    const SelectionDAG &DAG, unsigned Depth) const {
+  Known.resetAll();
+
+  switch (Op.getOpcode()) {
+  case NVPTXISD::PRMT:
+    computeKnownBitsForPRMT(Op, Known, DAG, Depth);
+    break;
+  default:
+    break;
+  }
+}
\ No newline at end of file
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
index 2477e1fb61595..bc3548c0272bb 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -50,7 +50,6 @@ enum NodeType : unsigned {
   MUL_WIDE_UNSIGNED,
   SETP_F16X2,
   SETP_BF16X2,
-  BFE,
   BFI,
   PRMT,
 
@@ -272,6 +271,11 @@ class NVPTXTargetLowering : public TargetLowering {
   unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT,
                                      EVT ToVT) const override;
 
+  void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
+                                     const APInt &DemandedElts,
+                                     const SelectionDAG &DAG,
+                                     unsigned Depth = 0) const override;
+
 private:
   const NVPTXSubtarget &STI; // cache the subtarget here
   mutable unsigned GlobalUniqueCallSite;
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index dcdebb81e3c86..6913b68453574 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -1359,11 +1359,6 @@ def BREV64 :
 // restriction in PTX?
 //
 // dest and src may be int32 or int64, but start and end are always int32.
-def SDTBFE :
-  SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>,
-                       SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
-def bfe : SDNode<"NVPTXISD::BFE", SDTBFE>;
-
 def SDTBFI :
   SDTypeProfile<1, 4, [SDTCisInt<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, 
                        SDTCisVT<3, i32>, SDTCisVT<4, i32>]>;
@@ -1374,22 +1369,13 @@ def SDTPRMT :
                        SDTCisVT<2, i32>, SDTCisVT<3, i32>, SDTCisVT<4, i32>]>;
 def prmt : SDNode<"NVPTXISD::PRMT", SDTPRMT>;
 
-multiclass BFE<string Instr, ValueType T, RegisterClass RC> {
+multiclass BFE<string Instr, RegisterClass RC> {
   def rrr
-    : BasicNVPTXInst<(outs RC:$d),
-                (ins RC:$a, B32:$b, B32:$c),
-                Instr,
-                [(set T:$d, (bfe T:$a, i32:$b, i32:$c))]>;
+    : BasicNVPTXInst<(outs RC:$d), (ins RC:$a, B32:$b, B32:$c), Instr>;
   def rri
-    : BasicNVPTXInst<(outs RC:$d),
-                (ins RC:$a, B32:$b, i32imm:$c),
-                Instr,
-                [(set T:$d, (bfe T:$a, i32:$b, imm:$c))]>;
+    : BasicNVPTXInst<(outs RC:$d), (ins RC:$a, B32:$b, i32imm:$c), Instr>;
   def rii
-    : BasicNVPTXInst<(outs RC:$d),
-                (ins RC:$a, i32imm:$b, i32imm:$c),
-                Instr,
-                [(set T:$d, (bfe T:$a, imm:$b, imm:$c))]>;
+    : BasicNVPTXInst<(outs RC:$d), (ins RC:$a, i32imm:$b, i32imm:$c), Instr>;
 }
 
 multiclass BFI<string Instr, ValueType T, RegisterClass RC, Operand ImmCls> {
@@ -1434,10 +1420,10 @@ let hasSideEffects = false in {
   // the same patterns, so the first one wins. Having unsigned byte extraction
   // has the benefit of always having zero in unused bits, which makes some
   // optimizations easier (e.g. no need to mask them).
-  defm BFE_U32 : BFE<"bfe.u32", i32, B32>;
-  defm BFE_S32 : BFE<"bfe.s32", i32, B32>;
-  defm BFE_U64 : BFE<"bfe.u64", i64, B64>;
-  defm BFE_S64 : BFE<"bfe.s64", i64, B64>;
+  defm BFE_U32 : BFE<"bfe.u32", B32>;
+  defm BFE_S32 : BFE<"bfe.s32", B32>;
+  defm BFE_U64 : BFE<"bfe.u64", B64>;
+  defm BFE_S64 : BFE<"bfe.s64", B64>;
 
   defm BFI_B32 : BFI<"bfi.b32", i32, B32, i32imm>;
   defm BFI_B64 : BFI<"bfi.b64", i64, B64, i64imm>;
@@ -1474,19 +1460,26 @@ def : Pat<(fshr i32:$hi, i32:$lo, (shl i32:$amt, (i32 3))),
           (PRMT_B32rrr $lo, $hi, $amt, PrmtF4E)>;
 
 
+def byte_extract_prmt : ImmLeaf<i32, [{
+  return (Imm == 0x7770) || (Imm == 0x7771) || (Imm == 0x7772) || (Imm == 0x7773);
+}]>;
+
+def to_sign_extend_selector : SDNodeXForm<imm, [{
+  const APInt &V = N->getAPIntValue();
+  const APInt B = V.trunc(4);
+  const APInt BSext = B | 8;
+  const APInt R = BSext.concat(BSext).concat(BSext).concat(B).zext(32);
+  return CurDAG->getTargetConstant(R, SDLoc(N), MVT::i32);
+}]>;
+
+
 // byte extraction + signed/unsigned extension to i32.
-def : Pat<(i32 (sext_inreg (bfe i32:$s, i32:$o, 8), i8)),
-          (BFE_S32rri $s, $o, 8)>;
-def : Pat<(i32 (sext_inreg (bfe i32:$s, imm:$o, 8), i8)),
-          (BFE_S32rii $s, imm:$o, 8)>;
-def : Pat<(i32 (and (bfe i32:$s, i32:$o, 8), 255)),
-          (BFE_U32rri $s, $o, 8)>;
-def : Pat<(i32 (and (bfe i32:$s, imm:$o, 8), 255)),
-          (BFE_U32rii $s, imm:$o, 8)>;
+def : Pat<(i32 (sext_inreg (prmt i32:$s, 0, byte_extract_prmt:$sel, PrmtNONE), i8)),
+          (PRMT_B32rii $s, 0, (to_sign_extend_selector $sel), PrmtNONE)>;
 
 // byte extraction + signed extension to i16
-def : Pat<(i16 (sext_inreg (trunc (bfe i32:$s, imm:$o, 8)), i8)),
-          (CVT_s8_s32 (BFE_S32rii $s, imm:$o, 8), CvtNONE)>;
+def : Pat<(i16 (sext_inreg (trunc (prmt i32:$s, 0, byte_extract_prmt:$sel, PrmtNONE)), i8)),
+          (CVT_u16_u32 (PRMT_B32rii $s, 0, (to_sign_extend_selector $sel), PrmtNONE), CvtNONE)>;
 
 
 // Byte extraction via shift/trunc/sext
@@ -1699,25 +1692,33 @@ def cond_not_signed : PatLeaf<(cond), [{
 // comparisons of i8 extracted with BFE as i32
 // It's faster to do comparison directly on i32 extracted by BFE,
 // instead of the long conversion and sign extending.
-def: Pat<(setcc (i16 (sext_inreg (i16 (trunc (bfe B32:$a, B32:$oa, 8))), i8)),
-                (i16 (sext_inreg (i16 (trunc (bfe B32:$b, B32:$ob, 8))), i8)),
+def: Pat<(setcc (i16 (sext_inreg (i16 (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE))), i8)),
+                (i16 (sext_inreg (i16 (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE))), i8)),
                 cond_signed:$cc),
-         (SETP_i32rr (BFE_S32rri $a, $oa, 8), (BFE_S32rri $b, $ob, 8), (cond2cc $cc))>;
+         (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
+                     (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE), 
+                     (cond2cc $cc))>;
 
-def: Pat<(setcc (i16 (sext_inreg (trunc (bfe B32:$a, imm:$oa, 8)), i8)),
-                (i16 (sext_inreg (trunc (bfe B32:$b, imm:$ob, 8)), i8)),
+def: Pat<(setcc (i16 (sext_inreg (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE)), i8)),
+                (i16 (sext_inreg (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE)), i8)),
                 cond_signed:$cc),
-         (SETP_i32rr (BFE_S32rii $a, imm:$oa, 8), (BFE_S32rii $b, imm:$ob, 8), (cond2cc $cc))>;
+         (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
+                     (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE), 
+                     (cond2cc $cc))>;
 
-def: Pat<(setcc (i16 (and (trunc (bfe B32:$a, B32:$oa, 8)), 255)),
-                (i16 (and (trunc (bfe B32:$b, B32:$ob, 8)), 255)),
+def: Pat<(setcc (i16 (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE))),
+                (i16 (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE))),
                 cond_signed:$cc),
-         (SETP_i32rr (BFE_U32rri $a, $oa, 8), (BFE_U32rri $b, $ob, 8), (cond2cc $cc))>;
+         (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
+                     (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE),
+                     (cond2cc $cc))>;
 
-def: Pat<(setcc (i16 (and (trunc (bfe B32:$a, imm:$oa, 8)), 255)),
-                (i16 (and (trunc (bfe B32:$b, imm:$ob, 8)), 255)),
+def: Pat<(setcc (i16 (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE))),
+                (i16 (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE))),
                 cond_not_signed:$cc),
-         (SETP_i32rr (BFE_U32rii $a, imm:$oa, 8), (BFE_U32rii $b, imm:$ob, 8), (cond2cc $cc))>;
+         (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
+                     (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE), 
+                     (cond2cc $cc))>;
 
 def SDTDeclareArrayParam :
   SDTypeProfile<0, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, i32>]>;
diff --git a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
index 1207c429524ca..23832a9cb5c58 100644
--- a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
+++ b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
@@ -178,38 +178,38 @@ define void @combine_v16i8(ptr noundef align 16 %ptr1, ptr noundef align 16 %ptr
 ; ENABLED-NEXT:  // %bb.0:
 ; ENABLED-NEXT:    ld.param.b64 %rd1, [combine_v16i8_param_0];
 ; ENABLED-NEXT:    ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; ENABLED-NEXT:    prmt.b32 %r5, %r4, 0, 0x7773U;
+; ENABLED-NEXT:    prmt.b32 %r6, %r4, 0, 0x7772U;
+; ENABLED-NEXT:    prmt.b32 %r7, %r4, 0, 0x7771U;
+; ENABLED-NEXT:    prmt.b32 %r8, %r4, 0, 0x7770U;
+; ENABLED-NEXT:    prmt.b32 %r9, %r3, 0, 0x7773U;
+; ENABLED-NEXT:    prmt.b32 %r10, %r3, 0, 0x7772U;
+; ENABLED-NEXT:    prmt.b32 %r11, %r3, 0, 0x7771U;
+; ENABLED-NEXT:    prmt.b32 %r12, %r3, 0, 0x7770U;
+; ENABLED-NEXT:    prmt.b32 %r13, %r2, 0, 0x7773U;
+; ENABLED-NEXT:    prmt.b32 %r14, %r2, 0, 0x7772U;
+; ENABLED-NEXT:    prmt.b32 %r15, %r2, 0, 0x7771U;
+; ENABLED-NEXT:    prmt.b32 %r16, %r2, 0, 0x7770U;
+; ENABLED-NEXT:    prmt.b32 %r17, %r1, 0, 0x7773U;
+; ENABLED-NEXT:    prmt.b32 %r18, %r1, 0, 0x7772U;
+; ENABLED-NEXT:    prmt.b32 %r19, %r1, 0, 0x7771U;
+; ENABLED-NEXT:    prmt.b32 %r20, %r1, 0, 0x7770U;
 ; ENABLED-NEXT:    ld.param.b64 %rd2, [combine_v16i8_param_1];
-; ENABLED-NEXT:    bfe.u32 %r5, %r1, 0, 8;
-; ENABLED-NEXT:    bfe.u32 %r6, %r1, 8, 8;
-; ENABLED-NEXT:    bfe.u32 %r7, %r1, 16, 8;
-; ENABLED-NEXT:    bfe.u32 %r8, %r1, 24, 8;
-; ENABLED-NEXT:    bfe.u32 %r9, %r2, 0, 8;
-; ENABLED-NEXT:    bfe.u32 %r10, %r2, 8, 8;
-; ENABLED-NEXT:    bfe.u32 %r11, %r2, 16, 8;
-; ENABLED-NEXT:    bfe.u32 %r12, %r2, 24, 8;
-; ENABLED-NEXT:    bfe.u32 %r13, %r3, 0, 8;
-; ENABLED-NEXT:    bfe.u32 %r14, %r3, 8, 8;
-; ENABLED-NEXT:    bfe.u32 %r15, %r3, 16, 8;
-; ENABLED-NEXT:    bfe.u32 %r16, %r3, 24, 8;
-; ENABLED-NEXT:    bfe.u32 %r17, %r4, 0, 8;
-; ENABLED-NEXT:    bfe.u32 %r18, %r4, 8, 8;
-; ENABLED-NEXT:    bfe.u32 %r19, %r4, 16, 8;
-; ENABLED-NEXT:    bfe.u32 %r20, %r4, 24, 8;
-; ENABLED-NEXT:    add.s32 %r21, %r5, %r6;
-; ENABLED-NEXT:    add.s32 %r22, %r21, %r7;
-; ENABLED-NEXT:    add.s32 %r23, %r22, %r8;
-; ENABLED-NEXT:    add.s32 %r24, %r23, %r9;
-; ENABLED-NEXT:    add.s32 %r25, %r24, %r10;
-; ENABLED-NEXT:    add.s32 %r26, %r25, %r11;
-; ENABLED-NEXT:    add.s32 %r27, %r26, %r12;
-; ENABLED-NEXT:    add.s32 %r28, %r27, %r13;
-; ENABLED-NEXT:    add.s32 %r29, %r28, %r14;
-; ENABLED-NEXT:    add.s32 %r30, %r29, %r15;
-; ENABLED-NEXT:    add.s32 %r31, %r30, %r16;
-; ENABLED-NEXT:    add.s32 %r32, %r31, %r17;
-; ENABLED-NEXT:    add.s32 %r33, %r32, %r18;
-; ENABLED-NEXT:    add.s32 %r34, %r33, %r19;
-; ENABLED-NEXT:    add.s32 %r35, %r34, %r20;
+; ENABLED-NEXT:    add.s32 %r21, %r20, %r19;
+; ENABLED-NEXT:    add.s32 %r22, %r21, %r18;
+; ENABLED-NEXT:    add.s32 %r23, %r22, %r17;
+; ENABLED-NEXT:    add.s32 %r24, %r23, %r16;
+; ENABLED-NEXT:    add.s32 %r25, %r24, %r15;
+; ENABLED-NEXT:    add.s32 %r26, %r25, %r14;
+; ENABLED-NEXT:    add.s32 %r27, %r26, %r13;
+; ENABLED-NEXT:    add.s32 %r28, %r27, %r12;
+; ENABLED-NEXT:    add.s32 %r29, %r28, %r11;
+; ENABLED-NEXT:    add.s32 %r30, %r29, %r10;
+; ENABLED-NEXT:    add.s32 %r31, %r30, %r9;
+; ENABLED-NEXT:    add.s32 %r32, %r31, %r8;
+; ENABLED-NEXT:    add.s32 %r33, %r32, %r7;
+; ENABLED-NEXT:    add.s32 %r34, %r33, %r6;
+; ENABLED-NEXT:    add.s32 %r35, %r34, %r5;
 ; ENABLED-NEXT:    st.b32 [%rd2], %r35;
 ; ENABLED-NEXT:    ret;
 ;
@@ -329,39 +329,39 @@ define void @combine_v16i8_unaligned(ptr noundef align 8 %ptr1, ptr noundef alig
 ; ENABLED-NEXT:  // %bb.0:
 ; ENABLED-NEXT:    ld.param.b64 %rd1, [combine_v16i8_unaligned_param_0];
 ; ENABLED-NEXT:    ld.v2.b32 {%r1, %r2}, [%rd1];
+; ENABLED-NEXT:    prmt.b32 %r3, %r2, 0, 0x7773U;
+; ENABLED-NEXT:    prmt.b32 %r4, %r2, 0, 0x7772U;
+; ENABLED-NEXT:    prmt.b32 %r5, %r2, 0, 0x7771U;
+; ENABLED-NEXT:    prmt.b32 %r6, %r2, 0, 0x7770U;
+; ENABLED-NEXT:    prmt.b32 %r7, %r1, 0, 0x7773U;
+; ENABLED-NEXT:    prmt.b32 %r8, %r1, 0, 0x7772U;
+; ENABLED-NEXT:    prmt.b32 %r9, %r1, 0, 0x7771U;
+; ENABLED-NEXT:    prmt.b32 %r10, %r1, 0, 0x7770U;
 ; ENABLED-NEXT:    ld.param.b64 %rd2, [combine_v16i8_unaligned_param_1];
-; ENABLED-NEXT:    ld.v2.b32 {%r3, %r4}, [%rd1+8];
-; ENABLED-NEXT:    bfe.u32 %r5, %r1, 0, 8;
-; ENABLED-NEXT:    bfe.u32 %r6, %r1, 8, 8;
-; ENABLED-NEXT:    bfe.u32 %r7, %r1, 16, 8;
-; ENABLED-NEXT:    bfe.u32 %r8, %r1, 24, 8;
-; ENABLED-NEXT:    bfe.u32 %r9, %r2, 0, 8;
-; ENABLED-NEXT:    bfe.u32 %r10, %r2, 8, 8;
-; ENABLED-NEXT:    bfe.u32 %r11, %r2, 16, 8;
-; ENABLED-NEXT:    bfe.u32 %r12, %r2, 24, 8;
-; ENABLED-NEXT:    bfe.u32 %r13, %r3, 0, 8;
-; ENABLED-NEXT:    bfe.u32 %r14, %r3, 8, 8;
-; ENABLED-NEXT:    bfe.u32 %r15, %r3, 16, 8;
-; ENABLED-NEXT:    bfe.u32 %r16, %r3, 24, 8;
-; ENABLED-NEXT:    bfe.u32 %r17, %r4, 0, 8;
-; ENABLED-NEXT:    bfe.u32 %r18, %r4, 8, 8;
-; ENABLED-NEXT:    bfe.u32 %r19, %r4, 16, 8;
-; ENABLED-NEXT:    bfe.u32 %r20, %r4, 24, 8;
-; ENABLED-NEXT:    add.s32 %r21, %r5, %r6;
-; ENABLED-NEXT:    add.s32 %r22, %r21, %r7;
-; ENABLED-NEXT:    add.s32 %r23, %r22, %r8;
-; ENABLED-NEXT:    add.s32 %r24, %r23, %r9;
-; ENABLED-NEXT:    add.s32 %r25, %r24, %r10;
-; ENABLED-NEXT:    add.s32 %r26, %r25, %r11;
-; ENABLED-NEXT:    add.s32 %r27, %r26, %r12;
-; ENABLED-NEXT:    add.s32 %r28, %r27, %r13;
-; ENABLED-NEXT:    add.s32 %r29, %r28, %r14;
-; ENABLED-NEXT:    add.s32 %r30, %r29, %r15;
-; ENABLED-NEXT:    add.s32 %r31, %r30, %r16;
-; ENABLED-NEXT:    add.s32 %r32, %r31, %r17;
-; ENABLED-NEXT:    add.s32 %r33, %r32, %r18;
-; ENABLED-NEXT:    add.s32 %r34, %r33, %r19;
-; ENABLED-NEXT:    add.s32 %r35, %r34, %r20;
+; ENABLED-NEXT:    ld.v2.b32 {%r11, %r12}, [%rd1+8];
+; ENABLED-NEXT:    prmt.b32 %r13, %r12, 0, 0x7773U;
+; ENABLED-NEXT:    prmt.b32 %r14, %r12, 0, 0x7772U;
+; ENABLED-NEXT:    prmt.b32 %r15, %r12, 0, 0x7771U;
+; ENABLED-NEXT:    prmt.b32 %r16, %r12, 0, 0x7770U;
+; ENABLED-NEXT:    prmt.b32 %r17, %r11, 0, 0x7773U;
+; ENABLED-NEXT:    prmt.b32 %r18, %r11, 0, 0x7772U;
+; ENABLED-NEXT:    prmt.b32 %r19, %r11, 0, 0x7771U;
+; ENABLED-NEXT:    prmt.b32 %r20, %r11, 0, 0x7770U;
+; ENABLED-NEXT:    add.s32 %r21, %r10, %r9;
+; ENABLED-NEXT:    add.s32 %r22, %r21, %r8;
+; ENABLED-NEXT:    add.s32 %r23, %r22, %r7;
+; ENABLED-NEXT:    add.s32 %r24, %r23, %r6;
+; ENABLED-NEXT:    add.s32 %r25, %r24, %r5;
+; ENABLED-NEXT:    add.s32 %r26, %r25, %r4;
+; ENABLED-NEXT:    add.s32 %r27, %r26, %r3;
+; ENABLED-NEXT:    add.s32 %r28, %r27, %r20;
+; ENABLED-NEXT:    add.s32 %r29, %r28, %r19;
+; ENABLED-NEXT:    add.s32 %r30, %r29, %r18;
+; ENABLED-NEXT:    add.s32 %r31, %r30, %r17;
+; ENABLED-NEXT:    add.s32 %r32, %r31, %r16;
+; ENABLED-NEXT:    add.s32 %r33, %r32, %r15;
+; ENABLED-NEXT:    add.s32 %r34, %r33, %r14;
+; ENABLED-NEXT:    add.s32 %r35, %r34, %r13;
 ; ENABLED-NEXT:    st.b32 [%rd2], %r35;
 ; ENABLED-NEXT:    ret;
 ;
diff --git a/llvm/test/CodeGen/NVPTX/extractelement.ll b/llvm/test/CodeGen/NVPTX/extractelement.ll
index e04732ebad66b..80980efbab05b 100644
--- a/llvm/test/CodeGen/NVPTX/extractelement.ll
+++ b/llvm/test/CodeGen/NVPTX/extractelement.ll
@@ -60,14 +60,14 @@ define i16  @test_v4i8(i32 %a) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b32 %r1, [test_v4i8_param_0];
-; CHECK-NEXT:    bfe.s32 %r2, %r1, 0, 8;
-; CHECK-NEXT:    cvt.s8.s32 %rs1, %r2;
-; CHECK-NEXT:    bfe.s32 %r3, %r1, 8, 8;
-; CHECK-NEXT:    cvt.s8.s32 %rs2, %r3;
-; CHECK-NEXT:    bfe.s32 %r4, %r1, 16, 8;
-; CHECK-NEXT:    cvt.s8.s32 %rs3, %r4;
-; CHECK-NEXT:    bfe.s32 %r5, %r1, 24, 8;
-; CHEC...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/148261


More information about the llvm-commits mailing list