[llvm] 8129b6b - [NVPTX, InstCombine] instcombine known pointer AS checks. (#114325)

via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 31 09:24:55 PDT 2024


Author: Artem Belevich
Date: 2024-10-31T09:24:51-07:00
New Revision: 8129b6b53bf5a8f0cc2124764935761e74bbb5c8

URL: https://github.com/llvm/llvm-project/commit/8129b6b53bf5a8f0cc2124764935761e74bbb5c8
DIFF: https://github.com/llvm/llvm-project/commit/8129b6b53bf5a8f0cc2124764935761e74bbb5c8.diff

LOG: [NVPTX, InstCombine] instcombine known pointer AS checks. (#114325)

The change improves the code in general and, as a side effect, avoids
crashing on an impossible address space casts guarded 
by `__isGlobal/__isShared`, which partially fixes 
https://github.com/llvm/llvm-project/issues/112760

It's still possible to trigger the issue by using explicit AS casts w/o
AS checks, but LLVM should no longer crash on valid code.

This is #112964 + a small fix for the crash on unintended argument
access which was the root cause to revers the earlier version of the patch.

Added: 
    llvm/include/llvm/Support/NVPTXAddrSpace.h
    llvm/test/Transforms/InstCombine/NVPTX/isspacep.ll

Modified: 
    llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
    llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Support/NVPTXAddrSpace.h b/llvm/include/llvm/Support/NVPTXAddrSpace.h
new file mode 100644
index 00000000000000..93eae39e3d2305
--- /dev/null
+++ b/llvm/include/llvm/Support/NVPTXAddrSpace.h
@@ -0,0 +1,33 @@
+//===---------------- NVPTXAddrSpace.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// NVPTX address space definition
+///
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_NVPTXADDRSPACE_H
+#define LLVM_SUPPORT_NVPTXADDRSPACE_H
+
+namespace llvm {
+namespace NVPTXAS {
+enum AddressSpace : unsigned {
+  ADDRESS_SPACE_GENERIC = 0,
+  ADDRESS_SPACE_GLOBAL = 1,
+  ADDRESS_SPACE_SHARED = 3,
+  ADDRESS_SPACE_CONST = 4,
+  ADDRESS_SPACE_LOCAL = 5,
+
+  ADDRESS_SPACE_PARAM = 101,
+};
+} // end namespace NVPTXAS
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_NVPTXADDRSPACE_H

diff  --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
index 815b600fe93a9f..d06e2c00ec3f96 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
@@ -16,18 +16,10 @@
 #ifndef LLVM_LIB_TARGET_NVPTX_MCTARGETDESC_NVPTXBASEINFO_H
 #define LLVM_LIB_TARGET_NVPTX_MCTARGETDESC_NVPTXBASEINFO_H
 
+#include "llvm/Support/NVPTXAddrSpace.h"
 namespace llvm {
 
-enum AddressSpace {
-  ADDRESS_SPACE_GENERIC = 0,
-  ADDRESS_SPACE_GLOBAL = 1,
-  ADDRESS_SPACE_SHARED = 3,
-  ADDRESS_SPACE_CONST = 4,
-  ADDRESS_SPACE_LOCAL = 5,
-
-  // NVVM Internal
-  ADDRESS_SPACE_PARAM = 101
-};
+using namespace NVPTXAS;
 
 namespace NVPTXII {
 enum {

diff  --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
index e35ba25b47880f..3507573df1869f 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
@@ -15,10 +15,12 @@
 #include "llvm/CodeGen/CostTable.h"
 #include "llvm/CodeGen/TargetLowering.h"
 #include "llvm/IR/Constants.h"
+#include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/IntrinsicsNVPTX.h"
 #include "llvm/IR/Value.h"
 #include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
 #include "llvm/Transforms/InstCombine/InstCombiner.h"
 #include <optional>
 using namespace llvm;
@@ -117,7 +119,8 @@ bool NVPTXTTIImpl::isSourceOfDivergence(const Value *V) {
 }
 
 // Convert NVVM intrinsics to target-generic LLVM code where possible.
-static Instruction *simplifyNvvmIntrinsic(IntrinsicInst *II, InstCombiner &IC) {
+static Instruction *convertNvvmIntrinsicToLlvm(InstCombiner &IC,
+                                               IntrinsicInst *II) {
   // Each NVVM intrinsic we can simplify can be replaced with one of:
   //
   //  * an LLVM intrinsic,
@@ -413,11 +416,65 @@ static Instruction *simplifyNvvmIntrinsic(IntrinsicInst *II, InstCombiner &IC) {
   llvm_unreachable("All SpecialCase enumerators should be handled in switch.");
 }
 
+// Returns an instruction pointer (may be nullptr if we do not know the answer).
+// Returns nullopt if `II` is not one of the `isspacep` intrinsics.
+static std::optional<Instruction *>
+handleSpaceCheckIntrinsics(InstCombiner &IC, IntrinsicInst &II) {
+  // Returns true/false when we know the answer, nullopt otherwise.
+  auto CheckASMatch = [](unsigned IID, unsigned AS) -> std::optional<bool> {
+    if (AS == NVPTXAS::ADDRESS_SPACE_GENERIC ||
+        AS == NVPTXAS::ADDRESS_SPACE_PARAM)
+      return std::nullopt; // Got to check at run-time.
+    switch (IID) {
+    case Intrinsic::nvvm_isspacep_global:
+      return AS == NVPTXAS::ADDRESS_SPACE_GLOBAL;
+    case Intrinsic::nvvm_isspacep_local:
+      return AS == NVPTXAS::ADDRESS_SPACE_LOCAL;
+    case Intrinsic::nvvm_isspacep_shared:
+      return AS == NVPTXAS::ADDRESS_SPACE_SHARED;
+    case Intrinsic::nvvm_isspacep_shared_cluster:
+      // We can't tell shared from shared_cluster at compile time from AS alone,
+      // but it can't be either is AS is not shared.
+      return AS == NVPTXAS::ADDRESS_SPACE_SHARED ? std::nullopt
+                                                 : std::optional{false};
+    case Intrinsic::nvvm_isspacep_const:
+      return AS == NVPTXAS::ADDRESS_SPACE_CONST;
+    default:
+      llvm_unreachable("Unexpected intrinsic");
+    }
+  };
+
+  switch (auto IID = II.getIntrinsicID()) {
+  case Intrinsic::nvvm_isspacep_global:
+  case Intrinsic::nvvm_isspacep_local:
+  case Intrinsic::nvvm_isspacep_shared:
+  case Intrinsic::nvvm_isspacep_shared_cluster:
+  case Intrinsic::nvvm_isspacep_const: {
+    Value *Op0 = II.getArgOperand(0);
+    unsigned AS = Op0->getType()->getPointerAddressSpace();
+    // Peek through ASC to generic AS.
+    // TODO: we could dig deeper through both ASCs and GEPs.
+    if (AS == NVPTXAS::ADDRESS_SPACE_GENERIC)
+      if (auto *ASCO = dyn_cast<AddrSpaceCastOperator>(Op0))
+        AS = ASCO->getOperand(0)->getType()->getPointerAddressSpace();
+
+    if (std::optional<bool> Answer = CheckASMatch(IID, AS))
+      return IC.replaceInstUsesWith(II,
+                                    ConstantInt::get(II.getType(), *Answer));
+    return nullptr; // Don't know the answer, got to check at run time.
+  }
+  default:
+    return std::nullopt;
+  }
+}
+
 std::optional<Instruction *>
 NVPTXTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
-  if (Instruction *I = simplifyNvvmIntrinsic(&II, IC)) {
+  if (std::optional<Instruction *> I = handleSpaceCheckIntrinsics(IC, II))
+    return *I;
+  if (Instruction *I = convertNvvmIntrinsicToLlvm(IC, &II))
     return I;
-  }
+
   return std::nullopt;
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/NVPTX/isspacep.ll b/llvm/test/Transforms/InstCombine/NVPTX/isspacep.ll
new file mode 100644
index 00000000000000..dedd85e1a8cda8
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/NVPTX/isspacep.ll
@@ -0,0 +1,277 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=instcombine -mtriple=nvptx64-nvidia-cuda -S | FileCheck %s
+target datalayout = "e-i64:64-i128:128-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-nvidia-cuda"
+
+; Source data in 
diff erent AS.
+ at shared_data = dso_local addrspace(3) global i32 undef, align 4
+ at global_data = dso_local addrspace(1) externally_initialized global i32 0, align 4
+ at const_data = dso_local addrspace(4) externally_initialized constant i32 3, align 4
+
+; Results get stored here.
+ at gen = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at g1 = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at g2 = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at s1 = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at s2 = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at c1 = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at c2 = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at l = dso_local addrspace(1) externally_initialized global i8 0, align 1
+
+declare i1 @llvm.nvvm.isspacep.global(ptr nocapture)
+declare i1 @llvm.nvvm.isspacep.shared(ptr nocapture)
+declare i1 @llvm.nvvm.isspacep.const(ptr nocapture)
+declare i1 @llvm.nvvm.isspacep.local(ptr nocapture)
+
+define dso_local void @check_global(ptr nocapture noundef readnone %out, ptr nocapture noundef readnone %genp,
+; CHECK-LABEL: define dso_local void @check_global(
+; CHECK-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr nocapture noundef readnone [[GENP:%.*]], ptr addrspace(1) [[GP:%.*]], ptr addrspace(3) [[SP:%.*]], ptr addrspace(4) [[CP:%.*]], ptr addrspace(5) [[LP:%.*]]) local_unnamed_addr {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[GEN0:%.*]] = tail call i1 @llvm.nvvm.isspacep.global(ptr [[GENP]])
+; CHECK-NEXT:    [[STOREDV:%.*]] = zext i1 [[GEN0]] to i8
+; CHECK-NEXT:    store i8 [[STOREDV]], ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+; CHECK-NEXT:    store i8 1, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+; CHECK-NEXT:    store i8 1, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+; CHECK-NEXT:    ret void
+;
+  ptr addrspace(1) %gp,
+  ptr addrspace(3) %sp,
+  ptr addrspace(4) %cp,
+  ptr addrspace(5) %lp) local_unnamed_addr {
+entry:
+  ; No constant folding for generic pointers of unknown origin.
+  %gen0 = tail call i1 @llvm.nvvm.isspacep.global(ptr %genp)
+  %storedv = zext i1 %gen0 to i8
+  store i8 %storedv, ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+
+  %isg1 = tail call i1 @llvm.nvvm.isspacep.global(ptr addrspacecast (ptr addrspace(1) @global_data to ptr))
+  %isg18 = zext i1 %isg1 to i8
+  store i8 %isg18, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+
+  %gp_asc = addrspacecast ptr addrspace(1) %gp to ptr
+  %isg2 = tail call i1 @llvm.nvvm.isspacep.global(ptr %gp_asc)
+  %isg28 = zext i1 %isg2 to i8
+  store i8 %isg28, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+
+  %iss1 = tail call i1 @llvm.nvvm.isspacep.global(ptr addrspacecast (ptr addrspace(3) @shared_data to ptr))
+  %iss18 = zext i1 %iss1 to i8
+  store i8 %iss18, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+
+  %sp_asc = addrspacecast ptr addrspace(3) %sp to ptr
+  %iss2 = tail call i1 @llvm.nvvm.isspacep.global(ptr %sp_asc)
+  %iss28 = zext i1 %iss2 to i8
+  store i8 %iss28, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+
+  %isc1 = tail call i1 @llvm.nvvm.isspacep.global(ptr addrspacecast (ptr addrspace(4) @const_data to ptr))
+  %isc18 = zext i1 %isc1 to i8
+  store i8 %isc18, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+
+  %cp_asc = addrspacecast ptr addrspace(4) %cp to ptr
+  %isc2 = tail call i1 @llvm.nvvm.isspacep.global(ptr %cp_asc)
+  %isc28 = zext i1 %isc2 to i8
+  store i8 %isc28, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+
+  ; Local data can't ihave a constant address, so we can't have a constant ASC expression
+  ; We can only use an ASC instruction.
+  %lp_asc = addrspacecast ptr addrspace(5) %lp to ptr
+  %isl = call i1 @llvm.nvvm.isspacep.global(ptr nonnull %lp_asc)
+  %isl8 = zext i1 %isl to i8
+  store i8 %isl8, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+
+  ret void
+}
+
+define dso_local void @check_shared(ptr nocapture noundef readnone %out, ptr nocapture noundef readnone %genp,
+; CHECK-LABEL: define dso_local void @check_shared(
+; CHECK-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr nocapture noundef readnone [[GENP:%.*]], ptr addrspace(1) [[GP:%.*]], ptr addrspace(3) [[SP:%.*]], ptr addrspace(4) [[CP:%.*]], ptr addrspace(5) [[LP:%.*]]) local_unnamed_addr {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[GEN0:%.*]] = tail call i1 @llvm.nvvm.isspacep.shared(ptr [[GENP]])
+; CHECK-NEXT:    [[STOREDV:%.*]] = zext i1 [[GEN0]] to i8
+; CHECK-NEXT:    store i8 [[STOREDV]], ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+; CHECK-NEXT:    store i8 1, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+; CHECK-NEXT:    store i8 1, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+; CHECK-NEXT:    ret void
+;
+  ptr addrspace(1) %gp,
+  ptr addrspace(3) %sp,
+  ptr addrspace(4) %cp,
+  ptr addrspace(5) %lp) local_unnamed_addr {
+entry:
+  ; No constant folding for generic pointers of unknown origin.
+  %gen0 = tail call i1 @llvm.nvvm.isspacep.shared(ptr %genp)
+  %storedv = zext i1 %gen0 to i8
+  store i8 %storedv, ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+
+  %isg1 = tail call i1 @llvm.nvvm.isspacep.shared(ptr addrspacecast (ptr addrspace(1) @global_data to ptr))
+  %isg18 = zext i1 %isg1 to i8
+  store i8 %isg18, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+
+  %gp_asc = addrspacecast ptr addrspace(1) %gp to ptr
+  %isg2 = tail call i1 @llvm.nvvm.isspacep.shared(ptr %gp_asc)
+  %isg28 = zext i1 %isg2 to i8
+  store i8 %isg28, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+
+  %iss1 = tail call i1 @llvm.nvvm.isspacep.shared(ptr addrspacecast (ptr addrspace(3) @shared_data to ptr))
+  %iss18 = zext i1 %iss1 to i8
+  store i8 %iss18, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+
+  %sp_asc = addrspacecast ptr addrspace(3) %sp to ptr
+  %iss2 = tail call i1 @llvm.nvvm.isspacep.shared(ptr %sp_asc)
+  %iss28 = zext i1 %iss2 to i8
+  store i8 %iss28, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+
+  %isc1 = tail call i1 @llvm.nvvm.isspacep.shared(ptr addrspacecast (ptr addrspace(4) @const_data to ptr))
+  %isc18 = zext i1 %isc1 to i8
+  store i8 %isc18, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+
+  %cp_asc = addrspacecast ptr addrspace(4) %cp to ptr
+  %isc2 = tail call i1 @llvm.nvvm.isspacep.shared(ptr %cp_asc)
+  %isc28 = zext i1 %isc2 to i8
+  store i8 %isc28, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+
+  ; Local data can't have a constant address, so we can't have a constant ASC expression
+  ; We can only use an ASC instruction.
+  %lp_asc = addrspacecast ptr addrspace(5) %lp to ptr
+  %isl = call i1 @llvm.nvvm.isspacep.shared(ptr nonnull %lp_asc)
+  %isl8 = zext i1 %isl to i8
+  store i8 %isl8, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+
+  ret void
+}
+
+define dso_local void @check_const(ptr nocapture noundef readnone %out, ptr nocapture noundef readnone %genp,
+; CHECK-LABEL: define dso_local void @check_const(
+; CHECK-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr nocapture noundef readnone [[GENP:%.*]], ptr addrspace(1) [[GP:%.*]], ptr addrspace(3) [[SP:%.*]], ptr addrspace(4) [[CP:%.*]], ptr addrspace(5) [[LP:%.*]]) local_unnamed_addr {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[GEN0:%.*]] = tail call i1 @llvm.nvvm.isspacep.const(ptr [[GENP]])
+; CHECK-NEXT:    [[STOREDV:%.*]] = zext i1 [[GEN0]] to i8
+; CHECK-NEXT:    store i8 [[STOREDV]], ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+; CHECK-NEXT:    store i8 1, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+; CHECK-NEXT:    store i8 1, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+; CHECK-NEXT:    ret void
+;
+  ptr addrspace(1) %gp,
+  ptr addrspace(3) %sp,
+  ptr addrspace(4) %cp,
+  ptr addrspace(5) %lp) local_unnamed_addr {
+entry:
+  ; No constant folding for generic pointers of unknown origin.
+  %gen0 = tail call i1 @llvm.nvvm.isspacep.const(ptr %genp)
+  %storedv = zext i1 %gen0 to i8
+  store i8 %storedv, ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+
+  %isg1 = tail call i1 @llvm.nvvm.isspacep.const(ptr addrspacecast (ptr addrspace(1) @global_data to ptr))
+  %isg18 = zext i1 %isg1 to i8
+  store i8 %isg18, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+
+  %gp_asc = addrspacecast ptr addrspace(1) %gp to ptr
+  %isg2 = tail call i1 @llvm.nvvm.isspacep.const(ptr %gp_asc)
+  %isg28 = zext i1 %isg2 to i8
+  store i8 %isg28, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+
+  %iss1 = tail call i1 @llvm.nvvm.isspacep.const(ptr addrspacecast (ptr addrspace(3) @shared_data to ptr))
+  %iss18 = zext i1 %iss1 to i8
+  store i8 %iss18, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+
+  %sp_asc = addrspacecast ptr addrspace(3) %sp to ptr
+  %iss2 = tail call i1 @llvm.nvvm.isspacep.const(ptr %sp_asc)
+  %iss28 = zext i1 %iss2 to i8
+  store i8 %iss28, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+
+  %isc1 = tail call i1 @llvm.nvvm.isspacep.const(ptr addrspacecast (ptr addrspace(4) @const_data to ptr))
+  %isc18 = zext i1 %isc1 to i8
+  store i8 %isc18, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+
+  %cp_asc = addrspacecast ptr addrspace(4) %cp to ptr
+  %isc2 = tail call i1 @llvm.nvvm.isspacep.const(ptr %cp_asc)
+  %isc28 = zext i1 %isc2 to i8
+  store i8 %isc28, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+
+  ; Local data can't have a constant address, so we can't have a constant ASC expression
+  ; We can only use an ASC instruction.
+  %lp_asc = addrspacecast ptr addrspace(5) %lp to ptr
+  %isl = call i1 @llvm.nvvm.isspacep.const(ptr nonnull %lp_asc)
+  %isl8 = zext i1 %isl to i8
+  store i8 %isl8, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+
+  ret void
+}
+
+define dso_local void @check_local(ptr nocapture noundef readnone %out, ptr nocapture noundef readnone %genp,
+; CHECK-LABEL: define dso_local void @check_local(
+; CHECK-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr nocapture noundef readnone [[GENP:%.*]], ptr addrspace(1) [[GP:%.*]], ptr addrspace(3) [[SP:%.*]], ptr addrspace(4) [[CP:%.*]], ptr addrspace(5) [[LP:%.*]]) local_unnamed_addr {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[GEN0:%.*]] = tail call i1 @llvm.nvvm.isspacep.local(ptr [[GENP]])
+; CHECK-NEXT:    [[STOREDV:%.*]] = zext i1 [[GEN0]] to i8
+; CHECK-NEXT:    store i8 [[STOREDV]], ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+; CHECK-NEXT:    store i8 0, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+; CHECK-NEXT:    store i8 1, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+; CHECK-NEXT:    ret void
+;
+  ptr addrspace(1) %gp,
+  ptr addrspace(3) %sp,
+  ptr addrspace(4) %cp,
+  ptr addrspace(5) %lp) local_unnamed_addr {
+entry:
+  ; No constant folding for generic pointers of unknown origin.
+  %gen0 = tail call i1 @llvm.nvvm.isspacep.local(ptr %genp)
+  %storedv = zext i1 %gen0 to i8
+  store i8 %storedv, ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+
+  %isg1 = tail call i1 @llvm.nvvm.isspacep.local(ptr addrspacecast (ptr addrspace(1) @global_data to ptr))
+  %isg18 = zext i1 %isg1 to i8
+  store i8 %isg18, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+
+  %gp_asc = addrspacecast ptr addrspace(1) %gp to ptr
+  %isg2 = tail call i1 @llvm.nvvm.isspacep.local(ptr %gp_asc)
+  %isg28 = zext i1 %isg2 to i8
+  store i8 %isg28, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+
+  %iss1 = tail call i1 @llvm.nvvm.isspacep.local(ptr addrspacecast (ptr addrspace(3) @shared_data to ptr))
+  %iss18 = zext i1 %iss1 to i8
+  store i8 %iss18, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+
+  %sp_asc = addrspacecast ptr addrspace(3) %sp to ptr
+  %iss2 = tail call i1 @llvm.nvvm.isspacep.local(ptr %sp_asc)
+  %iss28 = zext i1 %iss2 to i8
+  store i8 %iss28, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+
+  %isc1 = tail call i1 @llvm.nvvm.isspacep.local(ptr addrspacecast (ptr addrspace(4) @const_data to ptr))
+  %isc18 = zext i1 %isc1 to i8
+  store i8 %isc18, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+
+  %cp_asc = addrspacecast ptr addrspace(4) %cp to ptr
+  %isc2 = tail call i1 @llvm.nvvm.isspacep.local(ptr %cp_asc)
+  %isc28 = zext i1 %isc2 to i8
+  store i8 %isc28, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+
+  ; Local data can't have a constant address, so we can't have a constant ASC expression
+  ; We can only use an ASC instruction.
+  %lp_asc = addrspacecast ptr addrspace(5) %lp to ptr
+  %isl = call i1 @llvm.nvvm.isspacep.local(ptr nonnull %lp_asc)
+  %isl8 = zext i1 %isl to i8
+  store i8 %isl8, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+
+  ret void
+}
+


        


More information about the llvm-commits mailing list