[llvm] 6efdcc1 - [NVPTX] Fixup EXT_LOAD lowering for i128 values (#138049)

via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 30 20:51:59 PDT 2025


Author: Alex MacLean
Date: 2025-04-30T20:51:55-07:00
New Revision: 6efdcc188567fa2c9bde383e4397062f6d31427a

URL: https://github.com/llvm/llvm-project/commit/6efdcc188567fa2c9bde383e4397062f6d31427a
DIFF: https://github.com/llvm/llvm-project/commit/6efdcc188567fa2c9bde383e4397062f6d31427a.diff

LOG: [NVPTX] Fixup EXT_LOAD lowering for i128 values (#138049)

Ensure that when custom lowering a vector load/store to a multi-output
load/store node we confirm that the memory value type matches the type
used by the node. Also add some asserts for basic sanity checking of
load size.

Fixes https://github.com/llvm/llvm-project/issues/138034

Added: 
    llvm/test/CodeGen/NVPTX/i128-ld-st.ll

Modified: 
    llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
    llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 295ed666a1902..5c41ac261224d 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -26,6 +26,7 @@
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/MathExtras.h"
 #include <optional>
 
 using namespace llvm;
@@ -1141,6 +1142,9 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
   else
     FromType = getLdStRegType(ScalarVT);
 
+  assert(isPowerOf2_32(FromTypeWidth) && FromTypeWidth >= 8 &&
+         FromTypeWidth <= 128 && "Invalid width for load");
+
   // Create the machine instruction DAG
   SDValue Offset, Base;
   SelectADDR(N->getOperand(1), Base, Offset);
@@ -1236,6 +1240,9 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
     FromType = NVPTX::PTXLdStInstCode::Untyped;
   }
 
+  assert(isPowerOf2_32(FromTypeWidth) && FromTypeWidth >= 8 &&
+         FromTypeWidth <= 128 && TotalWidth <= 128 && "Invalid width for load");
+
   SDValue Offset, Base;
   SelectADDR(N->getOperand(1), Base, Offset);
   SDValue Ops[] = {getI32Imm(Ordering, DL),
@@ -1453,6 +1460,9 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
   // Create the machine instruction DAG
   SDValue Value = PlainStore ? PlainStore->getValue() : AtomicStore->getVal();
 
+  assert(isPowerOf2_32(ToTypeWidth) && ToTypeWidth >= 8 && ToTypeWidth <= 128 &&
+         "Invalid width for store");
+
   SDValue Offset, Base;
   SelectADDR(ST->getBasePtr(), Base, Offset);
 
@@ -1537,6 +1547,9 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
     ToType = NVPTX::PTXLdStInstCode::Untyped;
   }
 
+  assert(isPowerOf2_32(ToTypeWidth) && ToTypeWidth >= 8 && ToTypeWidth <= 128 &&
+         TotalWidth <= 128 && "Invalid width for store");
+
   SDValue Offset, Base;
   SelectADDR(N2, Base, Offset);
 

diff  --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index c41741ed10232..44bfd303fc861 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -3191,20 +3191,25 @@ SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
 
 SDValue
 NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
-  SDNode *N = Op.getNode();
+  MemSDNode *N = cast<MemSDNode>(Op.getNode());
   SDValue Val = N->getOperand(1);
   SDLoc DL(N);
-  EVT ValVT = Val.getValueType();
+  const EVT ValVT = Val.getValueType();
+  const EVT MemVT = N->getMemoryVT();
+
+  // If we're truncating as part of the store, avoid lowering to a StoreV node.
+  // TODO: consider relaxing this restriction.
+  if (ValVT != MemVT)
+    return SDValue();
 
   const auto NumEltsAndEltVT = getVectorLoweringShape(ValVT);
   if (!NumEltsAndEltVT)
     return SDValue();
   const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
 
-  MemSDNode *MemSD = cast<MemSDNode>(N);
   const DataLayout &TD = DAG.getDataLayout();
 
-  Align Alignment = MemSD->getAlign();
+  Align Alignment = N->getAlign();
   Align PrefAlign = TD.getPrefTypeAlign(ValVT.getTypeForEVT(*DAG.getContext()));
   if (Alignment < PrefAlign) {
     // This store is not sufficiently aligned, so bail out and let this vector
@@ -3267,7 +3272,7 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
 
   SDValue NewSt =
       DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
-                              MemSD->getMemoryVT(), MemSD->getMemOperand());
+                              N->getMemoryVT(), N->getMemOperand());
 
   // return DCI.CombineTo(N, NewSt, true);
   return NewSt;
@@ -5762,20 +5767,23 @@ static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG,
 /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
 static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
                               SmallVectorImpl<SDValue> &Results) {
-  const EVT ResVT = N->getValueType(0);
-  SDLoc DL(N);
+  LoadSDNode *LD = cast<LoadSDNode>(N);
+  const EVT ResVT = LD->getValueType(0);
+  const EVT MemVT = LD->getMemoryVT();
+
+  // If we're doing sign/zero extension as part of the load, avoid lowering to
+  // a LoadV node. TODO: consider relaxing this restriction.
+  if (ResVT != MemVT)
+    return;
 
   const auto NumEltsAndEltVT = getVectorLoweringShape(ResVT);
   if (!NumEltsAndEltVT)
     return;
   const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
 
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-
   Align Alignment = LD->getAlign();
   const auto &TD = DAG.getDataLayout();
-  Align PrefAlign =
-      TD.getPrefTypeAlign(LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
+  Align PrefAlign = TD.getPrefTypeAlign(MemVT.getTypeForEVT(*DAG.getContext()));
   if (Alignment < PrefAlign) {
     // This load is not sufficiently aligned, so bail out and let this vector
     // load be scalarized.  Note that we may still be able to emit smaller
@@ -5806,9 +5814,10 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
     break;
   }
   }
+  SDLoc DL(LD);
 
   // Copy regular operands
-  SmallVector<SDValue, 8> OtherOps(N->ops());
+  SmallVector<SDValue, 8> OtherOps(LD->ops());
 
   // The select routine does not have access to the LoadSDNode instance, so
   // pass along the extension information

diff  --git a/llvm/test/CodeGen/NVPTX/i128-ld-st.ll b/llvm/test/CodeGen/NVPTX/i128-ld-st.ll
new file mode 100644
index 0000000000000..41cffe9cdbf90
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/i128-ld-st.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -O0 -mcpu=sm_20 | FileCheck %s
+; RUN: %if ptxas %{ llc < %s -O0 -mcpu=sm_20 | %ptxas-verify %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+define i128 @foo(ptr %p, ptr %o) {
+; CHECK-LABEL: foo(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.u64 %rd2, [foo_param_1];
+; CHECK-NEXT:    ld.param.u64 %rd1, [foo_param_0];
+; CHECK-NEXT:    ld.u8 %rd3, [%rd1];
+; CHECK-NEXT:    mov.b64 %rd4, 0;
+; CHECK-NEXT:    st.v2.u64 [%rd2], {%rd3, %rd4};
+; CHECK-NEXT:    st.param.v2.b64 [func_retval0], {%rd3, %rd4};
+; CHECK-NEXT:    ret;
+  %c = load i8, ptr %p, align 1
+  %i = zext i8 %c to i128
+  store i128 %i, ptr %o, align 16
+  ret i128 %i
+}


        


More information about the llvm-commits mailing list