[llvm] [NVPTX] instcombine known pointer AS checks. (PR #112964)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 18 12:49:54 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-support
Author: Artem Belevich (Artem-B)
<details>
<summary>Changes</summary>
This avoids crashing on impossible address space casts guarded by `__isGlobal/__isShared`.
Partially fixes https://github.com/llvm/llvm-project/issues/112760
It's still possible to trigger the issue by using explicit AS casts w/o AS checks, but LLVM should no longer crash on valid code.
---
Full diff: https://github.com/llvm/llvm-project/pull/112964.diff
4 Files Affected:
- (added) llvm/include/llvm/Support/NVPTXAddrSpace.h (+33)
- (modified) llvm/lib/Analysis/InstructionSimplify.cpp (+30)
- (modified) llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h (+2-10)
- (added) llvm/test/Transforms/InstCombine/NVPTX/isspacep.ll (+261)
``````````diff
diff --git a/llvm/include/llvm/Support/NVPTXAddrSpace.h b/llvm/include/llvm/Support/NVPTXAddrSpace.h
new file mode 100644
index 00000000000000..063d2aaffdc57d
--- /dev/null
+++ b/llvm/include/llvm/Support/NVPTXAddrSpace.h
@@ -0,0 +1,33 @@
+//===---------------- AMDGPUAddrSpace.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// AMDGPU address space definition
+///
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_NVPTXADDRSPACE_H
+#define LLVM_SUPPORT_NVPTXADDRSPACE_H
+
+namespace llvm {
+namespace NVPTXAS {
+enum AddressSpace : unsigned {
+ ADDRESS_SPACE_GENERIC = 0,
+ ADDRESS_SPACE_GLOBAL = 1,
+ ADDRESS_SPACE_SHARED = 3,
+ ADDRESS_SPACE_CONST = 4,
+ ADDRESS_SPACE_LOCAL = 5,
+
+ ADDRESS_SPACE_PARAM = 101,
+};
+} // end namespace NVPTXAS
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_NVPTXADDRSPACE_H
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index d08be1e55c853e..b525bc27d72b8b 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -38,10 +38,12 @@
#include "llvm/IR/Dominators.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Statepoint.h"
#include "llvm/Support/KnownBits.h"
+#include "llvm/Support/NVPTXAddrSpace.h"
#include <algorithm>
#include <optional>
using namespace llvm;
@@ -6365,6 +6367,34 @@ static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
break;
}
+ case Intrinsic::nvvm_isspacep_global:
+ case Intrinsic::nvvm_isspacep_local:
+ case Intrinsic::nvvm_isspacep_shared:
+ case Intrinsic::nvvm_isspacep_const: {
+ auto *Ty = F->getReturnType();
+ unsigned AS = Op0->getType()->getPointerAddressSpace();
+ if (AS == NVPTXAS::ADDRESS_SPACE_GENERIC) {
+ if (auto *ASC = dyn_cast<AddrSpaceCastInst>(Op0))
+ AS = ASC->getSrcAddressSpace();
+ else if (auto *CE = dyn_cast<ConstantExpr>(Op0)) {
+ if (CE->getOpcode() == Instruction::AddrSpaceCast)
+ AS = CE->getOperand(0)->getType()->getPointerAddressSpace();
+ }
+ }
+ if (AS == NVPTXAS::ADDRESS_SPACE_GENERIC ||
+ AS == NVPTXAS::ADDRESS_SPACE_PARAM)
+ return nullptr; // Got to check at run-time.
+ bool ASMatches = (AS == NVPTXAS::ADDRESS_SPACE_GLOBAL &&
+ IID == Intrinsic::nvvm_isspacep_global) ||
+ (AS == NVPTXAS::ADDRESS_SPACE_LOCAL &&
+ IID == Intrinsic::nvvm_isspacep_local) ||
+ (AS == NVPTXAS::ADDRESS_SPACE_SHARED &&
+ IID == Intrinsic::nvvm_isspacep_shared) ||
+ (AS == NVPTXAS::ADDRESS_SPACE_CONST &&
+ IID == Intrinsic::nvvm_isspacep_const);
+ return ConstantInt::get(Ty, ASMatches);
+ break;
+ }
default:
break;
}
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
index 815b600fe93a9f..d06e2c00ec3f96 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
@@ -16,18 +16,10 @@
#ifndef LLVM_LIB_TARGET_NVPTX_MCTARGETDESC_NVPTXBASEINFO_H
#define LLVM_LIB_TARGET_NVPTX_MCTARGETDESC_NVPTXBASEINFO_H
+#include "llvm/Support/NVPTXAddrSpace.h"
namespace llvm {
-enum AddressSpace {
- ADDRESS_SPACE_GENERIC = 0,
- ADDRESS_SPACE_GLOBAL = 1,
- ADDRESS_SPACE_SHARED = 3,
- ADDRESS_SPACE_CONST = 4,
- ADDRESS_SPACE_LOCAL = 5,
-
- // NVVM Internal
- ADDRESS_SPACE_PARAM = 101
-};
+using namespace NVPTXAS;
namespace NVPTXII {
enum {
diff --git a/llvm/test/Transforms/InstCombine/NVPTX/isspacep.ll b/llvm/test/Transforms/InstCombine/NVPTX/isspacep.ll
new file mode 100644
index 00000000000000..f53ec0120cfb3e
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/NVPTX/isspacep.ll
@@ -0,0 +1,261 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=instcombine -mtriple=nvptx64-nvidia-cuda -S | FileCheck %s
+target datalayout = "e-i64:64-i128:128-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-nvidia-cuda"
+
+; Source data in different AS.
+ at shared_data = dso_local addrspace(3) global i32 undef, align 4
+ at global_data = dso_local addrspace(1) externally_initialized global i32 0, align 4
+ at const_data = dso_local addrspace(4) externally_initialized constant i32 3, align 4
+
+; Results get stored here.
+ at gen = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at g1 = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at g2 = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at s1 = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at s2 = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at c1 = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at c2 = dso_local addrspace(1) externally_initialized global i8 0, align 1
+ at l = dso_local addrspace(1) externally_initialized global i8 0, align 1
+
+declare i1 @llvm.nvvm.isspacep.global(ptr nocapture)
+declare i1 @llvm.nvvm.isspacep.shared(ptr nocapture)
+declare i1 @llvm.nvvm.isspacep.const(ptr nocapture)
+declare i1 @llvm.nvvm.isspacep.local(ptr nocapture)
+
+define dso_local void @check_global(ptr nocapture noundef readnone %out, ptr nocapture noundef readnone %generic_data, ptr addrspace(5) %local_data) local_unnamed_addr {
+; CHECK-LABEL: define dso_local void @check_global(
+; CHECK-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr nocapture noundef readnone [[GENERIC_DATA:%.*]], ptr addrspace(5) [[LOCAL_DATA:%.*]]) local_unnamed_addr {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[GEN0:%.*]] = tail call i1 @llvm.nvvm.isspacep.global(ptr [[GENERIC_DATA]])
+; CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[GEN0]] to i8
+; CHECK-NEXT: store i8 [[STOREDV]], ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+; CHECK-NEXT: store i8 1, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+; CHECK-NEXT: store i8 1, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ ; No constant folding for generic pointers of unknown origin.
+ %gen0 = tail call i1 @llvm.nvvm.isspacep.global(ptr %generic_data)
+ %storedv = zext i1 %gen0 to i8
+ store i8 %storedv, ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+
+ %isg1 = tail call i1 @llvm.nvvm.isspacep.global(ptr addrspacecast (ptr addrspace(1) @global_data to ptr))
+ %isg18 = zext i1 %isg1 to i8
+ store i8 %isg18, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+
+ %global_data_asc = addrspacecast ptr addrspace(1) @global_data to ptr
+ %isg2 = tail call i1 @llvm.nvvm.isspacep.global(ptr %global_data_asc)
+ %isg28 = zext i1 %isg2 to i8
+ store i8 %isg28, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+
+ %iss1 = tail call i1 @llvm.nvvm.isspacep.global(ptr addrspacecast (ptr addrspace(3) @shared_data to ptr))
+ %iss18 = zext i1 %iss1 to i8
+ store i8 %iss18, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+
+ %shared_data_asc = addrspacecast ptr addrspace(3) @shared_data to ptr
+ %iss2 = tail call i1 @llvm.nvvm.isspacep.global(ptr %shared_data_asc)
+ %iss28 = zext i1 %iss2 to i8
+ store i8 %iss28, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+
+ %isc1 = tail call i1 @llvm.nvvm.isspacep.global(ptr addrspacecast (ptr addrspace(4) @const_data to ptr))
+ %isc18 = zext i1 %isc1 to i8
+ store i8 %isc18, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+
+ %const_data_asc = addrspacecast ptr addrspace(4) @const_data to ptr
+ %isc2 = tail call i1 @llvm.nvvm.isspacep.global(ptr %const_data_asc)
+ %isc28 = zext i1 %isc2 to i8
+ store i8 %isc28, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+
+ ; Local data can't ihave a constant address, so we can't have a constant ASC expression
+ ; We can only use an ASC instruction.
+ %local_data_asc = addrspacecast ptr addrspace(5) %local_data to ptr
+ %isl = call i1 @llvm.nvvm.isspacep.global(ptr nonnull %local_data_asc)
+ %isl8 = zext i1 %isl to i8
+ store i8 %isl8, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+
+ ret void
+}
+
+define dso_local void @check_shared(ptr nocapture noundef readnone %out, ptr nocapture noundef readnone %generic_data, ptr addrspace(5) %local_data) local_unnamed_addr {
+; CHECK-LABEL: define dso_local void @check_shared(
+; CHECK-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr nocapture noundef readnone [[GENERIC_DATA:%.*]], ptr addrspace(5) [[LOCAL_DATA:%.*]]) local_unnamed_addr {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[GEN0:%.*]] = tail call i1 @llvm.nvvm.isspacep.shared(ptr [[GENERIC_DATA]])
+; CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[GEN0]] to i8
+; CHECK-NEXT: store i8 [[STOREDV]], ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+; CHECK-NEXT: store i8 1, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+; CHECK-NEXT: store i8 1, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ ; No constant folding for generic pointers of unknown origin.
+ %gen0 = tail call i1 @llvm.nvvm.isspacep.shared(ptr %generic_data)
+ %storedv = zext i1 %gen0 to i8
+ store i8 %storedv, ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+
+ %isg1 = tail call i1 @llvm.nvvm.isspacep.shared(ptr addrspacecast (ptr addrspace(1) @global_data to ptr))
+ %isg18 = zext i1 %isg1 to i8
+ store i8 %isg18, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+
+ %global_data_asc = addrspacecast ptr addrspace(1) @global_data to ptr
+ %isg2 = tail call i1 @llvm.nvvm.isspacep.shared(ptr %global_data_asc)
+ %isg28 = zext i1 %isg2 to i8
+ store i8 %isg28, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+
+ %iss1 = tail call i1 @llvm.nvvm.isspacep.shared(ptr addrspacecast (ptr addrspace(3) @shared_data to ptr))
+ %iss18 = zext i1 %iss1 to i8
+ store i8 %iss18, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+
+ %shared_data_asc = addrspacecast ptr addrspace(3) @shared_data to ptr
+ %iss2 = tail call i1 @llvm.nvvm.isspacep.shared(ptr %shared_data_asc)
+ %iss28 = zext i1 %iss2 to i8
+ store i8 %iss28, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+
+ %isc1 = tail call i1 @llvm.nvvm.isspacep.shared(ptr addrspacecast (ptr addrspace(4) @const_data to ptr))
+ %isc18 = zext i1 %isc1 to i8
+ store i8 %isc18, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+
+ %const_data_asc = addrspacecast ptr addrspace(4) @const_data to ptr
+ %isc2 = tail call i1 @llvm.nvvm.isspacep.shared(ptr %const_data_asc)
+ %isc28 = zext i1 %isc2 to i8
+ store i8 %isc28, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+
+ ; Local data can't have a constant address, so we can't have a constant ASC expression
+ ; We can only use an ASC instruction.
+ %local_data_asc = addrspacecast ptr addrspace(5) %local_data to ptr
+ %isl = call i1 @llvm.nvvm.isspacep.shared(ptr nonnull %local_data_asc)
+ %isl8 = zext i1 %isl to i8
+ store i8 %isl8, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+
+ ret void
+}
+
+define dso_local void @check_const(ptr nocapture noundef readnone %out, ptr nocapture noundef readnone %generic_data, ptr addrspace(5) %local_data) local_unnamed_addr {
+; CHECK-LABEL: define dso_local void @check_const(
+; CHECK-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr nocapture noundef readnone [[GENERIC_DATA:%.*]], ptr addrspace(5) [[LOCAL_DATA:%.*]]) local_unnamed_addr {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[GEN0:%.*]] = tail call i1 @llvm.nvvm.isspacep.const(ptr [[GENERIC_DATA]])
+; CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[GEN0]] to i8
+; CHECK-NEXT: store i8 [[STOREDV]], ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+; CHECK-NEXT: store i8 1, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+; CHECK-NEXT: store i8 1, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ ; No constant folding for generic pointers of unknown origin.
+ %gen0 = tail call i1 @llvm.nvvm.isspacep.const(ptr %generic_data)
+ %storedv = zext i1 %gen0 to i8
+ store i8 %storedv, ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+
+ %isg1 = tail call i1 @llvm.nvvm.isspacep.const(ptr addrspacecast (ptr addrspace(1) @global_data to ptr))
+ %isg18 = zext i1 %isg1 to i8
+ store i8 %isg18, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+
+ %global_data_asc = addrspacecast ptr addrspace(1) @global_data to ptr
+ %isg2 = tail call i1 @llvm.nvvm.isspacep.const(ptr %global_data_asc)
+ %isg28 = zext i1 %isg2 to i8
+ store i8 %isg28, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+
+ %iss1 = tail call i1 @llvm.nvvm.isspacep.const(ptr addrspacecast (ptr addrspace(3) @shared_data to ptr))
+ %iss18 = zext i1 %iss1 to i8
+ store i8 %iss18, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+
+ %shared_data_asc = addrspacecast ptr addrspace(3) @shared_data to ptr
+ %iss2 = tail call i1 @llvm.nvvm.isspacep.const(ptr %shared_data_asc)
+ %iss28 = zext i1 %iss2 to i8
+ store i8 %iss28, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+
+ %isc1 = tail call i1 @llvm.nvvm.isspacep.const(ptr addrspacecast (ptr addrspace(4) @const_data to ptr))
+ %isc18 = zext i1 %isc1 to i8
+ store i8 %isc18, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+
+ %const_data_asc = addrspacecast ptr addrspace(4) @const_data to ptr
+ %isc2 = tail call i1 @llvm.nvvm.isspacep.const(ptr %const_data_asc)
+ %isc28 = zext i1 %isc2 to i8
+ store i8 %isc28, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+
+ ; Local data can't have a constant address, so we can't have a constant ASC expression
+ ; We can only use an ASC instruction.
+ %local_data_asc = addrspacecast ptr addrspace(5) %local_data to ptr
+ %isl = call i1 @llvm.nvvm.isspacep.const(ptr nonnull %local_data_asc)
+ %isl8 = zext i1 %isl to i8
+ store i8 %isl8, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+
+ ret void
+}
+
+define dso_local void @check_local(ptr nocapture noundef readnone %out, ptr nocapture noundef readnone %generic_data, ptr addrspace(5) %local_data) local_unnamed_addr {
+; CHECK-LABEL: define dso_local void @check_local(
+; CHECK-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr nocapture noundef readnone [[GENERIC_DATA:%.*]], ptr addrspace(5) [[LOCAL_DATA:%.*]]) local_unnamed_addr {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[GEN0:%.*]] = tail call i1 @llvm.nvvm.isspacep.local(ptr [[GENERIC_DATA]])
+; CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[GEN0]] to i8
+; CHECK-NEXT: store i8 [[STOREDV]], ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+; CHECK-NEXT: store i8 0, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+; CHECK-NEXT: store i8 1, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ ; No constant folding for generic pointers of unknown origin.
+ %gen0 = tail call i1 @llvm.nvvm.isspacep.local(ptr %generic_data)
+ %storedv = zext i1 %gen0 to i8
+ store i8 %storedv, ptr addrspacecast (ptr addrspace(1) @gen to ptr), align 1
+
+ %isg1 = tail call i1 @llvm.nvvm.isspacep.local(ptr addrspacecast (ptr addrspace(1) @global_data to ptr))
+ %isg18 = zext i1 %isg1 to i8
+ store i8 %isg18, ptr addrspacecast (ptr addrspace(1) @g1 to ptr), align 1
+
+ %global_data_asc = addrspacecast ptr addrspace(1) @global_data to ptr
+ %isg2 = tail call i1 @llvm.nvvm.isspacep.local(ptr %global_data_asc)
+ %isg28 = zext i1 %isg2 to i8
+ store i8 %isg28, ptr addrspacecast (ptr addrspace(1) @g2 to ptr), align 1
+
+ %iss1 = tail call i1 @llvm.nvvm.isspacep.local(ptr addrspacecast (ptr addrspace(3) @shared_data to ptr))
+ %iss18 = zext i1 %iss1 to i8
+ store i8 %iss18, ptr addrspacecast (ptr addrspace(1) @s1 to ptr), align 1
+
+ %shared_data_asc = addrspacecast ptr addrspace(3) @shared_data to ptr
+ %iss2 = tail call i1 @llvm.nvvm.isspacep.local(ptr %shared_data_asc)
+ %iss28 = zext i1 %iss2 to i8
+ store i8 %iss28, ptr addrspacecast (ptr addrspace(1) @s2 to ptr), align 1
+
+ %isc1 = tail call i1 @llvm.nvvm.isspacep.local(ptr addrspacecast (ptr addrspace(4) @const_data to ptr))
+ %isc18 = zext i1 %isc1 to i8
+ store i8 %isc18, ptr addrspacecast (ptr addrspace(1) @c1 to ptr), align 1
+
+ %const_data_asc = addrspacecast ptr addrspace(4) @const_data to ptr
+ %isc2 = tail call i1 @llvm.nvvm.isspacep.local(ptr %const_data_asc)
+ %isc28 = zext i1 %isc2 to i8
+ store i8 %isc28, ptr addrspacecast (ptr addrspace(1) @c2 to ptr), align 1
+
+ ; Local data can't have a constant address, so we can't have a constant ASC expression
+ ; We can only use an ASC instruction.
+ %local_data_asc = addrspacecast ptr addrspace(5) %local_data to ptr
+ %isl = call i1 @llvm.nvvm.isspacep.local(ptr nonnull %local_data_asc)
+ %isl8 = zext i1 %isl to i8
+ store i8 %isl8, ptr addrspacecast (ptr addrspace(1) @l to ptr), align 1
+
+ ret void
+}
+
``````````
</details>
https://github.com/llvm/llvm-project/pull/112964
More information about the llvm-commits
mailing list