[llvm-branch-commits] [clang] [clang-tools-extra] [compiler-rt] [flang] [lldb] [llvm] [mlir] [flang] Lower omp.workshare to other omp constructs (PR #101446)
Ivan R. Ivanov via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Nov 19 00:01:43 PST 2024
https://github.com/ivanradanov updated https://github.com/llvm/llvm-project/pull/101446
>From 31a4d2c2eb265708b2ff50f6f9c53685a3df8d10 Mon Sep 17 00:00:00 2001
From: vporpo <vporpodas at google.com>
Date: Mon, 18 Nov 2024 20:23:10 -0800
Subject: [PATCH 01/22] [SandboxVec][DAG] Cleanup: Move callback registration
from Scheduler to DAG (#116455)
This is a refactoring patch that moves the callback registration for
getting notified about new instructions from the scheduler to the DAG.
This makes sense from a design and testing point of view:
- the DAG should not rely on the scheduler for getting notified
- the notifiers don't need to be public
- it's easier to test the notifiers directly from within the DAG unit
tests
---
.../SandboxVectorizer/DependencyGraph.h | 27 ++++++--
.../Vectorize/SandboxVectorizer/Scheduler.h | 9 +--
.../SandboxVectorizer/DependencyGraphTest.cpp | 66 ++++++++++++++-----
.../SandboxVectorizer/SchedulerTest.cpp | 2 +-
4 files changed, 72 insertions(+), 32 deletions(-)
diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.h
index 5211c7922ea2fd..765b65c4971bed 100644
--- a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.h
+++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/DependencyGraph.h
@@ -290,6 +290,9 @@ class DependencyGraph {
/// The DAG spans across all instructions in this interval.
Interval<Instruction> DAGInterval;
+ Context *Ctx = nullptr;
+ std::optional<Context::CallbackID> CreateInstrCB;
+
std::unique_ptr<BatchAAResults> BatchAA;
enum class DependencyType {
@@ -325,9 +328,24 @@ class DependencyGraph {
/// chain.
void createNewNodes(const Interval<Instruction> &NewInterval);
+ /// Called by the callbacks when a new instruction \p I has been created.
+ void notifyCreateInstr(Instruction *I) {
+ getOrCreateNode(I);
+ // TODO: Update the dependencies for the new node.
+ // TODO: Update the MemDGNode chain to include the new node if needed.
+ }
+
public:
- DependencyGraph(AAResults &AA)
- : BatchAA(std::make_unique<BatchAAResults>(AA)) {}
+ /// This constructor also registers callbacks.
+ DependencyGraph(AAResults &AA, Context &Ctx)
+ : Ctx(&Ctx), BatchAA(std::make_unique<BatchAAResults>(AA)) {
+ CreateInstrCB = Ctx.registerCreateInstrCallback(
+ [this](Instruction *I) { notifyCreateInstr(I); });
+ }
+ ~DependencyGraph() {
+ if (CreateInstrCB)
+ Ctx->unregisterCreateInstrCallback(*CreateInstrCB);
+ }
DGNode *getNode(Instruction *I) const {
auto It = InstrToNodeMap.find(I);
@@ -354,11 +372,6 @@ class DependencyGraph {
Interval<Instruction> extend(ArrayRef<Instruction *> Instrs);
/// \Returns the range of instructions included in the DAG.
Interval<Instruction> getInterval() const { return DAGInterval; }
- /// Called by the scheduler when a new instruction \p I has been created.
- void notifyCreateInstr(Instruction *I) {
- getOrCreateNode(I);
- // TODO: Update the dependencies for the new node.
- }
void clear() {
InstrToNodeMap.clear();
DAGInterval = {};
diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Scheduler.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Scheduler.h
index 9c11b5dbc16432..022fd71df67dc6 100644
--- a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Scheduler.h
+++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Scheduler.h
@@ -106,8 +106,6 @@ class Scheduler {
std::optional<BasicBlock::iterator> ScheduleTopItOpt;
// TODO: This is wasting memory in exchange for fast removal using a raw ptr.
DenseMap<SchedBundle *, std::unique_ptr<SchedBundle>> Bndls;
- Context &Ctx;
- Context::CallbackID CreateInstrCB;
/// \Returns a scheduling bundle containing \p Instrs.
SchedBundle *createBundle(ArrayRef<Instruction *> Instrs);
@@ -137,11 +135,8 @@ class Scheduler {
Scheduler &operator=(const Scheduler &) = delete;
public:
- Scheduler(AAResults &AA, Context &Ctx) : DAG(AA), Ctx(Ctx) {
- CreateInstrCB = Ctx.registerCreateInstrCallback(
- [this](Instruction *I) { DAG.notifyCreateInstr(I); });
- }
- ~Scheduler() { Ctx.unregisterCreateInstrCallback(CreateInstrCB); }
+ Scheduler(AAResults &AA, Context &Ctx) : DAG(AA, Ctx) {}
+ ~Scheduler() {}
bool trySchedule(ArrayRef<Instruction *> Instrs);
diff --git a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/DependencyGraphTest.cpp b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/DependencyGraphTest.cpp
index 061d57c31ce236..206f6c5b4c1359 100644
--- a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/DependencyGraphTest.cpp
+++ b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/DependencyGraphTest.cpp
@@ -194,7 +194,7 @@ define void @foo(i8 %v1, ptr %ptr) {
auto *Call = cast<sandboxir::CallInst>(&*It++);
auto *Ret = cast<sandboxir::ReturnInst>(&*It++);
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()});
EXPECT_TRUE(isa<llvm::sandboxir::MemDGNode>(DAG.getNode(Store)));
EXPECT_TRUE(isa<llvm::sandboxir::MemDGNode>(DAG.getNode(Load)));
@@ -224,7 +224,7 @@ define void @foo(ptr %ptr, i8 %v0, i8 %v1) {
auto *S0 = cast<sandboxir::StoreInst>(&*It++);
auto *S1 = cast<sandboxir::StoreInst>(&*It++);
auto *Ret = cast<sandboxir::ReturnInst>(&*It++);
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
auto Span = DAG.extend({&*BB->begin(), BB->getTerminator()});
// Check extend().
EXPECT_EQ(Span.top(), &*BB->begin());
@@ -285,7 +285,7 @@ define i8 @foo(i8 %v0, i8 %v1) {
auto *F = Ctx.createFunction(LLVMF);
auto *BB = &*F->begin();
auto It = BB->begin();
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()});
auto *AddN0 = DAG.getNode(cast<sandboxir::BinaryOperator>(&*It++));
@@ -332,7 +332,7 @@ define void @foo(ptr %ptr, i8 %v0, i8 %v1) {
auto *S1 = cast<sandboxir::StoreInst>(&*It++);
[[maybe_unused]] auto *Ret = cast<sandboxir::ReturnInst>(&*It++);
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()});
auto *S0N = cast<sandboxir::MemDGNode>(DAG.getNode(S0));
@@ -366,7 +366,7 @@ define void @foo(ptr %ptr, i8 %v0, i8 %v1) {
auto *S1 = cast<sandboxir::StoreInst>(&*It++);
auto *Ret = cast<sandboxir::ReturnInst>(&*It++);
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()});
auto *S0N = cast<sandboxir::MemDGNode>(DAG.getNode(S0));
@@ -436,7 +436,7 @@ define void @foo(ptr %ptr, i8 %v0, i8 %v1) {
sandboxir::Context Ctx(C);
auto *F = Ctx.createFunction(LLVMF);
auto *BB = &*F->begin();
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()});
auto It = BB->begin();
auto *Store0N = cast<sandboxir::MemDGNode>(
@@ -461,7 +461,7 @@ define void @foo(ptr noalias %ptr0, ptr noalias %ptr1, i8 %v0, i8 %v1) {
sandboxir::Context Ctx(C);
auto *F = Ctx.createFunction(LLVMF);
auto *BB = &*F->begin();
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()});
auto It = BB->begin();
auto *Store0N = cast<sandboxir::MemDGNode>(
@@ -487,7 +487,7 @@ define void @foo(ptr noalias %ptr0, ptr noalias %ptr1) {
sandboxir::Context Ctx(C);
auto *F = Ctx.createFunction(LLVMF);
auto *BB = &*F->begin();
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()});
auto It = BB->begin();
auto *Ld0N = cast<sandboxir::MemDGNode>(
@@ -512,7 +512,7 @@ define void @foo(ptr noalias %ptr0, ptr noalias %ptr1, i8 %v) {
sandboxir::Context Ctx(C);
auto *F = Ctx.createFunction(LLVMF);
auto *BB = &*F->begin();
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()});
auto It = BB->begin();
auto *Store0N = cast<sandboxir::MemDGNode>(
@@ -542,7 +542,7 @@ define void @foo(float %v1, float %v2) {
auto *F = Ctx.createFunction(LLVMF);
auto *BB = &*F->begin();
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()->getPrevNode()});
auto It = BB->begin();
@@ -574,7 +574,7 @@ define void @foo() {
auto *F = Ctx.createFunction(LLVMF);
auto *BB = &*F->begin();
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()->getPrevNode()});
auto It = BB->begin();
@@ -606,7 +606,7 @@ define void @foo(i8 %v0, i8 %v1, ptr %ptr) {
auto *F = Ctx.createFunction(LLVMF);
auto *BB = &*F->begin();
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()->getPrevNode()});
auto It = BB->begin();
@@ -637,7 +637,7 @@ define void @foo(ptr %ptr) {
auto *F = Ctx.createFunction(LLVMF);
auto *BB = &*F->begin();
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()->getPrevNode()});
auto It = BB->begin();
@@ -664,7 +664,7 @@ define void @foo(ptr %ptr) {
auto *F = Ctx.createFunction(LLVMF);
auto *BB = &*F->begin();
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()->getPrevNode()});
auto It = BB->begin();
@@ -695,7 +695,7 @@ define void @foo() {
auto *F = Ctx.createFunction(LLVMF);
auto *BB = &*F->begin();
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()->getPrevNode()});
auto It = BB->begin();
@@ -728,7 +728,7 @@ define void @foo(ptr %ptr, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
auto *S3 = cast<sandboxir::StoreInst>(&*It++);
auto *S4 = cast<sandboxir::StoreInst>(&*It++);
auto *S5 = cast<sandboxir::StoreInst>(&*It++);
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
{
// Scenario 1: Build new DAG
auto NewIntvl = DAG.extend({S3, S3});
@@ -788,7 +788,7 @@ define void @foo(ptr %ptr, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
{
// Check UnscheduledSuccs when a node is scheduled
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({S2, S2});
auto *S2N = cast<sandboxir::MemDGNode>(DAG.getNode(S2));
S2N->setScheduled(true);
@@ -798,3 +798,35 @@ define void @foo(ptr %ptr, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
EXPECT_EQ(S1N->getNumUnscheduledSuccs(), 0u); // S1 is scheduled
}
}
+
+TEST_F(DependencyGraphTest, CreateInstrCallback) {
+ parseIR(C, R"IR(
+define void @foo(ptr %ptr, i8 %v1, i8 %v2, i8 %v3, i8 %arg) {
+ store i8 %v1, ptr %ptr
+ store i8 %v2, ptr %ptr
+ store i8 %v3, ptr %ptr
+ ret void
+}
+)IR");
+ llvm::Function *LLVMF = &*M->getFunction("foo");
+ sandboxir::Context Ctx(C);
+ auto *F = Ctx.createFunction(LLVMF);
+ auto *BB = &*F->begin();
+ auto It = BB->begin();
+ auto *S1 = cast<sandboxir::StoreInst>(&*It++);
+ [[maybe_unused]] auto *S2 = cast<sandboxir::StoreInst>(&*It++);
+ auto *S3 = cast<sandboxir::StoreInst>(&*It++);
+
+ // Check new instruction callback.
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
+ DAG.extend({S1, S3});
+ auto *Arg = F->getArg(3);
+ auto *Ptr = S1->getPointerOperand();
+ sandboxir::StoreInst *NewS =
+ sandboxir::StoreInst::create(Arg, Ptr, Align(8), S3->getIterator(),
+ /*IsVolatile=*/true, Ctx);
+ auto *NewSN = DAG.getNode(NewS);
+ EXPECT_TRUE(NewSN != nullptr);
+ // TODO: Check the dependencies to/from NewSN after they land.
+ // TODO: Check the MemDGNode chain.
+}
diff --git a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/SchedulerTest.cpp b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/SchedulerTest.cpp
index 94a57914429748..c5e44a97976a72 100644
--- a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/SchedulerTest.cpp
+++ b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/SchedulerTest.cpp
@@ -70,7 +70,7 @@ define void @foo(ptr %ptr, i8 %v0, i8 %v1) {
auto *S1 = cast<sandboxir::StoreInst>(&*It++);
auto *Ret = cast<sandboxir::ReturnInst>(&*It++);
- sandboxir::DependencyGraph DAG(getAA(*LLVMF));
+ sandboxir::DependencyGraph DAG(getAA(*LLVMF), Ctx);
DAG.extend({&*BB->begin(), BB->getTerminator()});
auto *SN0 = DAG.getNode(S0);
auto *SN1 = DAG.getNode(S1);
>From 1eaa17975dc568cff4fe31a79c0d147ef5c55301 Mon Sep 17 00:00:00 2001
From: Madhur Amilkanthwar <madhura at nvidia.com>
Date: Tue, 19 Nov 2024 09:58:20 +0530
Subject: [PATCH 02/22] [LoopInterchange] Bail out early if minimum loop nest
is not met (#115128)
This patch bails out early if minimum depth
is not met. As it stands today, the pass computes
CacheCost before it attempts to do the transform.
This is not needed if minimum depth is not met.
This handles basic cases where depth is typically 1.
As the patch avoids unnecessary computation, it is aimed to improve
compile-time.
---
.../lib/Transforms/Scalar/LoopInterchange.cpp | 20 ++++--
.../LoopInterchange/bail-out-one-loop.ll | 65 +++++++++++++++++++
2 files changed, 81 insertions(+), 4 deletions(-)
create mode 100644 llvm/test/Transforms/LoopInterchange/bail-out-one-loop.ll
diff --git a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
index db63bda1e6b926..a8c3d61030bb78 100644
--- a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
@@ -234,6 +234,14 @@ static void populateWorklist(Loop &L, LoopVector &LoopList) {
LoopList.push_back(CurrentLoop);
}
+static bool hasMinimumLoopDepth(SmallVectorImpl<Loop *> &LoopList) {
+ unsigned LoopNestDepth = LoopList.size();
+ if (LoopNestDepth < 2) {
+ LLVM_DEBUG(dbgs() << "Loop doesn't contain minimum nesting level.\n");
+ return false;
+ }
+ return true;
+}
namespace {
/// LoopInterchangeLegality checks if it is legal to interchange the loop.
@@ -416,11 +424,11 @@ struct LoopInterchange {
bool processLoopList(SmallVectorImpl<Loop *> &LoopList) {
bool Changed = false;
+
+ // Ensure minimum loop nest depth.
+ assert(hasMinimumLoopDepth(LoopList) && "Loop nest does not meet minimum depth.");
+
unsigned LoopNestDepth = LoopList.size();
- if (LoopNestDepth < 2) {
- LLVM_DEBUG(dbgs() << "Loop doesn't contain minimum nesting level.\n");
- return false;
- }
if (LoopNestDepth > MaxLoopNestDepth) {
LLVM_DEBUG(dbgs() << "Cannot handle loops of depth greater than "
<< MaxLoopNestDepth << "\n");
@@ -1712,6 +1720,10 @@ PreservedAnalyses LoopInterchangePass::run(LoopNest &LN,
LoopStandardAnalysisResults &AR,
LPMUpdater &U) {
Function &F = *LN.getParent();
+ SmallVector<Loop *, 8> LoopList(LN.getLoops());
+ // Ensure minimum depth of the loop nest to do the interchange.
+ if (!hasMinimumLoopDepth(LoopList))
+ return PreservedAnalyses::all();
DependenceInfo DI(&F, &AR.AA, &AR.SE, &AR.LI);
std::unique_ptr<CacheCost> CC =
diff --git a/llvm/test/Transforms/LoopInterchange/bail-out-one-loop.ll b/llvm/test/Transforms/LoopInterchange/bail-out-one-loop.ll
new file mode 100644
index 00000000000000..788e1b0157d80f
--- /dev/null
+++ b/llvm/test/Transforms/LoopInterchange/bail-out-one-loop.ll
@@ -0,0 +1,65 @@
+; REQUIRES: asserts
+
+; RUN: opt < %s -passes=loop-interchange -debug -disable-output 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+
+ at N = dso_local global i32 0, align 4
+ at a = dso_local global ptr null, align 8
+ at b = dso_local global ptr null, align 8
+ at c = dso_local global ptr null, align 8
+
+; Loop interchange should not run delinearization
+; for one loop case and should bail out early.
+
+; CHECK-NOT: Delinearizing
+; CHECK-NOT: Strides:
+; CHECK-NOT: Terms:
+; CHECK: Loop doesn't contain minimum nesting level.
+
+define void @foo() {
+entry:
+ %retval = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 0, ptr %retval, align 4
+ store i32 0, ptr %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32, ptr %i, align 4
+ %1 = load i32, ptr @N, align 4
+ %cmp = icmp ult i32 %0, %1
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond
+ br label %for.end
+
+for.body: ; preds = %for.cond
+ %2 = load ptr, ptr @b, align 8
+ %3 = load i32, ptr %i, align 4
+ %idxprom = zext i32 %3 to i64
+ %arrayidx = getelementptr inbounds nuw i32, ptr %2, i64 %idxprom
+ %4 = load i32, ptr %arrayidx, align 4
+ %5 = load ptr, ptr @c, align 8
+ %6 = load i32, ptr %i, align 4
+ %idxprom1 = zext i32 %6 to i64
+ %arrayidx2 = getelementptr inbounds nuw i32, ptr %5, i64 %idxprom1
+ %7 = load i32, ptr %arrayidx2, align 4
+ %add = add nsw i32 %4, %7
+ %8 = load ptr, ptr @a, align 8
+ %9 = load i32, ptr %i, align 4
+ %idxprom3 = zext i32 %9 to i64
+ %arrayidx4 = getelementptr inbounds nuw i32, ptr %8, i64 %idxprom3
+ store i32 %add, ptr %arrayidx4, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %10 = load i32, ptr %i, align 4
+ %inc = add i32 %10, 1
+ store i32 %inc, ptr %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond.cleanup
+ ret void
+}
+
>From 08ef9396377cd60239a6312640f4fdc9c91a1703 Mon Sep 17 00:00:00 2001
From: Maksim Panchenko <maks at fb.com>
Date: Mon, 18 Nov 2024 20:42:38 -0800
Subject: [PATCH 03/22] [BOLT] Overwrite .eh_frame_hdr in-place (#116730)
If the new EH frame header can fit into the original .eh_frame_hdr
section, overwrite it in-place and pad with zeroes.
---
bolt/lib/Rewrite/RewriteInstance.cpp | 80 ++++++++++++++++++----------
bolt/test/eh-frame-hdr.test | 12 +++++
2 files changed, 63 insertions(+), 29 deletions(-)
create mode 100644 bolt/test/eh-frame-hdr.test
diff --git a/bolt/lib/Rewrite/RewriteInstance.cpp b/bolt/lib/Rewrite/RewriteInstance.cpp
index 1fcf2bb959bbbb..40769944e3876b 100644
--- a/bolt/lib/Rewrite/RewriteInstance.cpp
+++ b/bolt/lib/Rewrite/RewriteInstance.cpp
@@ -5791,42 +5791,64 @@ void RewriteInstance::writeEHFrameHeader() {
LLVM_DEBUG(dbgs() << "BOLT: writing a new " << getEHFrameHdrSectionName()
<< '\n');
- NextAvailableAddress =
- appendPadding(Out->os(), NextAvailableAddress, EHFrameHdrAlign);
+ // Try to overwrite the original .eh_frame_hdr if the size permits.
+ uint64_t EHFrameHdrOutputAddress = 0;
+ uint64_t EHFrameHdrFileOffset = 0;
+ std::vector<char> NewEHFrameHdr;
+ BinarySection *OldEHFrameHdrSection = getSection(getEHFrameHdrSectionName());
+ if (OldEHFrameHdrSection) {
+ NewEHFrameHdr = CFIRdWrt->generateEHFrameHeader(
+ RelocatedEHFrame, NewEHFrame, OldEHFrameHdrSection->getAddress());
+ if (NewEHFrameHdr.size() <= OldEHFrameHdrSection->getSize()) {
+ BC->outs() << "BOLT-INFO: rewriting " << getEHFrameHdrSectionName()
+ << " in-place\n";
+ EHFrameHdrOutputAddress = OldEHFrameHdrSection->getAddress();
+ EHFrameHdrFileOffset = OldEHFrameHdrSection->getInputFileOffset();
+ } else {
+ OldEHFrameHdrSection->setOutputName(getOrgSecPrefix() +
+ getEHFrameHdrSectionName());
+ OldEHFrameHdrSection = nullptr;
+ }
+ }
- const uint64_t EHFrameHdrOutputAddress = NextAvailableAddress;
- const uint64_t EHFrameHdrFileOffset =
- getFileOffsetForAddress(NextAvailableAddress);
+ // If there was not enough space, allocate more memory for .eh_frame_hdr.
+ if (!OldEHFrameHdrSection) {
+ NextAvailableAddress =
+ appendPadding(Out->os(), NextAvailableAddress, EHFrameHdrAlign);
- std::vector<char> NewEHFrameHdr = CFIRdWrt->generateEHFrameHeader(
- RelocatedEHFrame, NewEHFrame, EHFrameHdrOutputAddress);
+ EHFrameHdrOutputAddress = NextAvailableAddress;
+ EHFrameHdrFileOffset = getFileOffsetForAddress(NextAvailableAddress);
+
+ NewEHFrameHdr = CFIRdWrt->generateEHFrameHeader(
+ RelocatedEHFrame, NewEHFrame, EHFrameHdrOutputAddress);
+
+ NextAvailableAddress += NewEHFrameHdr.size();
+ if (!BC->BOLTReserved.empty() &&
+ (NextAvailableAddress > BC->BOLTReserved.end())) {
+ BC->errs() << "BOLT-ERROR: unable to fit " << getEHFrameHdrSectionName()
+ << " into reserved space\n";
+ exit(1);
+ }
+
+ // Create a new entry in the section header table.
+ const unsigned Flags = BinarySection::getFlags(/*IsReadOnly=*/true,
+ /*IsText=*/false,
+ /*IsAllocatable=*/true);
+ BinarySection &EHFrameHdrSec = BC->registerOrUpdateSection(
+ getNewSecPrefix() + getEHFrameHdrSectionName(), ELF::SHT_PROGBITS,
+ Flags, nullptr, NewEHFrameHdr.size(), /*Alignment=*/1);
+ EHFrameHdrSec.setOutputFileOffset(EHFrameHdrFileOffset);
+ EHFrameHdrSec.setOutputAddress(EHFrameHdrOutputAddress);
+ EHFrameHdrSec.setOutputName(getEHFrameHdrSectionName());
+ }
Out->os().seek(EHFrameHdrFileOffset);
Out->os().write(NewEHFrameHdr.data(), NewEHFrameHdr.size());
- const unsigned Flags = BinarySection::getFlags(/*IsReadOnly=*/true,
- /*IsText=*/false,
- /*IsAllocatable=*/true);
- BinarySection *OldEHFrameHdrSection = getSection(getEHFrameHdrSectionName());
+ // Pad the contents if overwriting in-place.
if (OldEHFrameHdrSection)
- OldEHFrameHdrSection->setOutputName(getOrgSecPrefix() +
- getEHFrameHdrSectionName());
-
- BinarySection &EHFrameHdrSec = BC->registerOrUpdateSection(
- getNewSecPrefix() + getEHFrameHdrSectionName(), ELF::SHT_PROGBITS, Flags,
- nullptr, NewEHFrameHdr.size(), /*Alignment=*/1);
- EHFrameHdrSec.setOutputFileOffset(EHFrameHdrFileOffset);
- EHFrameHdrSec.setOutputAddress(EHFrameHdrOutputAddress);
- EHFrameHdrSec.setOutputName(getEHFrameHdrSectionName());
-
- NextAvailableAddress += EHFrameHdrSec.getOutputSize();
-
- if (!BC->BOLTReserved.empty() &&
- (NextAvailableAddress > BC->BOLTReserved.end())) {
- BC->errs() << "BOLT-ERROR: unable to fit " << getEHFrameHdrSectionName()
- << " into reserved space\n";
- exit(1);
- }
+ Out->os().write_zeros(OldEHFrameHdrSection->getSize() -
+ NewEHFrameHdr.size());
// Merge new .eh_frame with the relocated original so that gdb can locate all
// FDEs.
diff --git a/bolt/test/eh-frame-hdr.test b/bolt/test/eh-frame-hdr.test
new file mode 100644
index 00000000000000..4d718c850e2f28
--- /dev/null
+++ b/bolt/test/eh-frame-hdr.test
@@ -0,0 +1,12 @@
+# Check that llvm-bolt overwrites .eh_frame_hdr in-place.
+
+REQUIRES: system-linux
+
+RUN: %clang %cflags %p/Inputs/hello.c -o %t -Wl,-q
+RUN: llvm-bolt %t -o %t.bolt --use-old-text \
+RUN: | FileCheck %s --check-prefix=CHECK-BOLT
+RUN: llvm-readelf -WS %t.bolt | FileCheck %s
+
+CHECK-BOLT: rewriting .eh_frame_hdr in-place
+
+CHECK-NOT: .bolt.org.eh_frame_hdr
>From a17f11baa1b622359547604555173384e220eef3 Mon Sep 17 00:00:00 2001
From: Wael Yehia <wyehia at ca.ibm.com>
Date: Mon, 18 Nov 2024 23:01:07 -0500
Subject: [PATCH 04/22] [test][PGO] Add a multi-threaded test for continuous
PGO.
---
.../ContinuousSyncMode/multi-threaded.cpp | 29 +++++++++++++++++++
compiler-rt/test/profile/lit.cfg.py | 8 +++++
2 files changed, 37 insertions(+)
create mode 100644 compiler-rt/test/profile/ContinuousSyncMode/multi-threaded.cpp
diff --git a/compiler-rt/test/profile/ContinuousSyncMode/multi-threaded.cpp b/compiler-rt/test/profile/ContinuousSyncMode/multi-threaded.cpp
new file mode 100644
index 00000000000000..ff05a69a5e7d4d
--- /dev/null
+++ b/compiler-rt/test/profile/ContinuousSyncMode/multi-threaded.cpp
@@ -0,0 +1,29 @@
+// REQUIRES: target={{.*(darwin|aix).*}}
+
+// RUN: rm -f %t.profraw
+// RUN: %clangxx_pgogen_cont -lpthread %s -o %t.exe -mllvm -disable-vp -fprofile-update=atomic
+// RUN: env LLVM_PROFILE_FILE="%c%t.profraw" %run %t.exe
+// RUN: llvm-profdata show --counts --function=accum %t.profraw | FileCheck %s
+// CHECK: Block counts: [100000, 4]
+
+#include <thread>
+
+int x = 0;
+void accum(int n) {
+ for (int i = 0; i < n; i++)
+ x += i; // don't care about accuracy, no need for atomic.
+}
+
+int main() {
+ int init_value = 10000;
+ auto t1 = std::thread(accum, 1*init_value);
+ auto t2 = std::thread(accum, 2*init_value);
+ auto t3 = std::thread(accum, 3*init_value);
+ auto t4 = std::thread(accum, 4*init_value);
+
+ t1.join();
+ t2.join();
+ t3.join();
+ t4.join();
+ return !x;
+}
diff --git a/compiler-rt/test/profile/lit.cfg.py b/compiler-rt/test/profile/lit.cfg.py
index 7a8877b9f4e50e..72a389eaf0dfb2 100644
--- a/compiler-rt/test/profile/lit.cfg.py
+++ b/compiler-rt/test/profile/lit.cfg.py
@@ -138,6 +138,14 @@ def exclude_unsupported_files_for_aix(dirname):
config.substitutions.append(
("%clangxx_pgogen=", build_invocation(clang_cxxflags) + " -fprofile-generate=")
)
+config.substitutions.append(
+ (
+ "%clangxx_pgogen_cont ",
+ build_invocation(clang_cxxflags)
+ + " -fprofile-generate "
+ + ("-mllvm -runtime-counter-relocation " if runtime_reloc else ""),
+ )
+)
config.substitutions.append(
("%clang_cspgogen ", build_invocation(clang_cflags) + " -fcs-profile-generate ")
>From ff9509e7d8ffac11ec25cea6c0dd7783097d3181 Mon Sep 17 00:00:00 2001
From: Vasileios Porpodas <vporpodas at google.com>
Date: Mon, 18 Nov 2024 20:33:46 -0800
Subject: [PATCH 05/22] [SandboxVec][BottomUpVec][NFC] Add some comments
---
.../Vectorize/SandboxVectorizer/Passes/BottomUpVec.h | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.h
index 6109db71611018..bd45634814b076 100644
--- a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.h
+++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.h
@@ -30,12 +30,17 @@ class BottomUpVec final : public FunctionPass {
/// Creates and returns a vector instruction that replaces the instructions in
/// \p Bndl. \p Operands are the already vectorized operands.
Value *createVectorInstr(ArrayRef<Value *> Bndl, ArrayRef<Value *> Operands);
+ /// Erases all dead instructions from the dead instruction candidates
+ /// collected during vectorization.
void tryEraseDeadInstrs();
+ /// Packs all elements of \p ToPack into a vector and returns that vector.
Value *createPack(ArrayRef<Value *> ToPack);
+ /// Recursively try to vectorize \p Bndl and its operands.
Value *vectorizeRec(ArrayRef<Value *> Bndl, unsigned Depth);
+ /// Entry point for vectorization starting from \p Seeds.
bool tryVectorize(ArrayRef<Value *> Seeds);
- // The PM containing the pipeline of region passes.
+ /// The PM containing the pipeline of region passes.
RegionPassManager RPM;
public:
>From 661c593850715881d2805a59e90e6d87d8b9fbb8 Mon Sep 17 00:00:00 2001
From: Haopeng Liu <153236845+haopliu at users.noreply.github.com>
Date: Mon, 18 Nov 2024 21:36:05 -0800
Subject: [PATCH 06/22] [FunctionAttrs] Add the "initializes" attribute
inference (#97373)
Add the "initializes" attribute inference.
This change is expected to have ~0.09% compile time regression, which
seems acceptable for interprocedural DSE.
https://llvm-compile-time-tracker.com/compare.php?from=9f10252c4ad7cffbbcf692fa9c953698f82ac4f5&to=56345c1cee4375eb5c28b8e7abf4803d20216b3b&stat=instructions%3Au
---
llvm/lib/Transforms/IPO/FunctionAttrs.cpp | 329 +++++++++-
.../TypeBasedAliasAnalysis/functionattrs.ll | 2 +-
.../amdgpu-libcall-sincos-pass-ordering.ll | 2 +-
.../AMDGPU/amdgpu-simplify-libcall-sincos.ll | 104 ++--
.../BPF/preserve-static-offset/store-zero.ll | 2 +-
llvm/test/Other/optimize-inrange-gep.ll | 2 +-
llvm/test/Transforms/Coroutines/coro-async.ll | 6 +-
.../Transforms/FunctionAttrs/argmemonly.ll | 10 +-
.../Transforms/FunctionAttrs/initializes.ll | 572 ++++++++++++++++++
.../Transforms/FunctionAttrs/readattrs.ll | 4 +-
.../Transforms/FunctionAttrs/writeonly.ll | 4 +-
.../PGOProfile/memprof_internal_linkage.ll | 2 +-
.../PhaseOrdering/X86/unroll-vectorizer.ll | 2 +-
.../Transforms/PhaseOrdering/memcpy-offset.ll | 2 +-
llvm/test/Transforms/PhaseOrdering/pr95152.ll | 6 +-
15 files changed, 972 insertions(+), 77 deletions(-)
create mode 100644 llvm/test/Transforms/FunctionAttrs/initializes.ll
diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 79746201133bdd..afb0ea72b269c8 100644
--- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -15,6 +15,7 @@
#include "llvm/Transforms/IPO/FunctionAttrs.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
@@ -36,6 +37,7 @@
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
+#include "llvm/IR/ConstantRangeList.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstIterator.h"
@@ -581,6 +583,200 @@ struct ArgumentUsesTracker : public CaptureTracker {
const SCCNodeSet &SCCNodes;
};
+/// A struct of argument use: a Use and the offset it accesses. This struct
+/// is to track uses inside function via GEP. If GEP has a non-constant index,
+/// the Offset field is nullopt.
+struct ArgumentUse {
+ Use *U;
+ std::optional<int64_t> Offset;
+};
+
+/// A struct of argument access info. "Unknown" accesses are the cases like
+/// unrecognized instructions, instructions that have more than one use of
+/// the argument, or volatile memory accesses. "WriteWithSideEffect" are call
+/// instructions that not only write an argument but also capture it.
+struct ArgumentAccessInfo {
+ enum class AccessType : uint8_t { Write, WriteWithSideEffect, Read, Unknown };
+ AccessType ArgAccessType;
+ ConstantRangeList AccessRanges;
+};
+
+/// A struct to wrap the argument use info per block.
+struct UsesPerBlockInfo {
+ SmallDenseMap<Instruction *, ArgumentAccessInfo, 4> Insts;
+ bool HasWrites = false;
+ bool HasUnknownAccess = false;
+};
+
+/// A struct to summarize the argument use info in a function.
+struct ArgumentUsesSummary {
+ bool HasAnyWrite = false;
+ bool HasWriteOutsideEntryBB = false;
+ SmallDenseMap<const BasicBlock *, UsesPerBlockInfo, 16> UsesPerBlock;
+};
+
+ArgumentAccessInfo getArgmentAccessInfo(const Instruction *I,
+ const ArgumentUse &ArgUse,
+ const DataLayout &DL) {
+ auto GetTypeAccessRange =
+ [&DL](Type *Ty,
+ std::optional<int64_t> Offset) -> std::optional<ConstantRange> {
+ auto TypeSize = DL.getTypeStoreSize(Ty);
+ if (!TypeSize.isScalable() && Offset) {
+ int64_t Size = TypeSize.getFixedValue();
+ return ConstantRange(APInt(64, *Offset, true),
+ APInt(64, *Offset + Size, true));
+ }
+ return std::nullopt;
+ };
+ auto GetConstantIntRange =
+ [](Value *Length,
+ std::optional<int64_t> Offset) -> std::optional<ConstantRange> {
+ auto *ConstantLength = dyn_cast<ConstantInt>(Length);
+ if (ConstantLength && Offset)
+ return ConstantRange(
+ APInt(64, *Offset, true),
+ APInt(64, *Offset + ConstantLength->getSExtValue(), true));
+ return std::nullopt;
+ };
+ if (auto *SI = dyn_cast<StoreInst>(I)) {
+ if (SI->isSimple() && &SI->getOperandUse(1) == ArgUse.U) {
+ // Get the fixed type size of "SI". Since the access range of a write
+ // will be unioned, if "SI" doesn't have a fixed type size, we just set
+ // the access range to empty.
+ ConstantRangeList AccessRanges;
+ if (auto TypeAccessRange =
+ GetTypeAccessRange(SI->getAccessType(), ArgUse.Offset))
+ AccessRanges.insert(*TypeAccessRange);
+ return {ArgumentAccessInfo::AccessType::Write, std::move(AccessRanges)};
+ }
+ } else if (auto *LI = dyn_cast<LoadInst>(I)) {
+ if (LI->isSimple()) {
+ assert(&LI->getOperandUse(0) == ArgUse.U);
+ // Get the fixed type size of "LI". Different from Write, if "LI"
+ // doesn't have a fixed type size, we conservatively set as a clobber
+ // with an empty access range.
+ if (auto TypeAccessRange =
+ GetTypeAccessRange(LI->getAccessType(), ArgUse.Offset))
+ return {ArgumentAccessInfo::AccessType::Read, {*TypeAccessRange}};
+ }
+ } else if (auto *MemSet = dyn_cast<MemSetInst>(I)) {
+ if (!MemSet->isVolatile()) {
+ ConstantRangeList AccessRanges;
+ if (auto AccessRange =
+ GetConstantIntRange(MemSet->getLength(), ArgUse.Offset))
+ AccessRanges.insert(*AccessRange);
+ return {ArgumentAccessInfo::AccessType::Write, AccessRanges};
+ }
+ } else if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
+ if (!MTI->isVolatile()) {
+ if (&MTI->getOperandUse(0) == ArgUse.U) {
+ ConstantRangeList AccessRanges;
+ if (auto AccessRange =
+ GetConstantIntRange(MTI->getLength(), ArgUse.Offset))
+ AccessRanges.insert(*AccessRange);
+ return {ArgumentAccessInfo::AccessType::Write, AccessRanges};
+ } else if (&MTI->getOperandUse(1) == ArgUse.U) {
+ if (auto AccessRange =
+ GetConstantIntRange(MTI->getLength(), ArgUse.Offset))
+ return {ArgumentAccessInfo::AccessType::Read, {*AccessRange}};
+ }
+ }
+ } else if (auto *CB = dyn_cast<CallBase>(I)) {
+ if (CB->isArgOperand(ArgUse.U)) {
+ unsigned ArgNo = CB->getArgOperandNo(ArgUse.U);
+ bool IsInitialize = CB->paramHasAttr(ArgNo, Attribute::Initializes);
+ // Argument is a Write when parameter is writeonly/readnone
+ // and nocapture. Otherwise, it's a WriteWithSideEffect.
+ auto Access = CB->onlyWritesMemory(ArgNo) &&
+ CB->paramHasAttr(ArgNo, Attribute::NoCapture)
+ ? ArgumentAccessInfo::AccessType::Write
+ : ArgumentAccessInfo::AccessType::WriteWithSideEffect;
+ ConstantRangeList AccessRanges;
+ if (IsInitialize && ArgUse.Offset) {
+ Attribute Attr = CB->getParamAttr(ArgNo, Attribute::Initializes);
+ ConstantRangeList CBCRL = Attr.getValueAsConstantRangeList();
+ for (ConstantRange &CR : CBCRL)
+ AccessRanges.insert(ConstantRange(CR.getLower() + *ArgUse.Offset,
+ CR.getUpper() + *ArgUse.Offset));
+ return {Access, AccessRanges};
+ }
+ }
+ }
+ // Other unrecognized instructions are considered as unknown.
+ return {ArgumentAccessInfo::AccessType::Unknown, {}};
+}
+
+// Collect the uses of argument "A" in "F".
+ArgumentUsesSummary collectArgumentUsesPerBlock(Argument &A, Function &F) {
+ auto &DL = F.getParent()->getDataLayout();
+ unsigned PointerSize =
+ DL.getIndexSizeInBits(A.getType()->getPointerAddressSpace());
+ ArgumentUsesSummary Result;
+
+ BasicBlock &EntryBB = F.getEntryBlock();
+ SmallVector<ArgumentUse, 4> Worklist;
+ for (Use &U : A.uses())
+ Worklist.push_back({&U, 0});
+
+ // Update "UsesPerBlock" with the block of "I" as key and "Info" as value.
+ // Return true if the block of "I" has write accesses after updating.
+ auto UpdateUseInfo = [&Result](Instruction *I, ArgumentAccessInfo Info) {
+ auto *BB = I->getParent();
+ auto &BBInfo = Result.UsesPerBlock[BB];
+ bool AlreadyVisitedInst = BBInfo.Insts.contains(I);
+ auto &IInfo = BBInfo.Insts[I];
+
+ // Instructions that have more than one use of the argument are considered
+ // as clobbers.
+ if (AlreadyVisitedInst) {
+ IInfo = {ArgumentAccessInfo::AccessType::Unknown, {}};
+ BBInfo.HasUnknownAccess = true;
+ return false;
+ }
+
+ IInfo = std::move(Info);
+ BBInfo.HasUnknownAccess |=
+ IInfo.ArgAccessType == ArgumentAccessInfo::AccessType::Unknown;
+ bool InfoHasWrites =
+ (IInfo.ArgAccessType == ArgumentAccessInfo::AccessType::Write ||
+ IInfo.ArgAccessType ==
+ ArgumentAccessInfo::AccessType::WriteWithSideEffect) &&
+ !IInfo.AccessRanges.empty();
+ BBInfo.HasWrites |= InfoHasWrites;
+ return InfoHasWrites;
+ };
+
+ // No need for a visited set because we don't look through phis, so there are
+ // no cycles.
+ while (!Worklist.empty()) {
+ ArgumentUse ArgUse = Worklist.pop_back_val();
+ User *U = ArgUse.U->getUser();
+ // Add GEP uses to worklist.
+ // If the GEP is not a constant GEP, set the ArgumentUse::Offset to nullopt.
+ if (auto *GEP = dyn_cast<GEPOperator>(U)) {
+ std::optional<int64_t> NewOffset = std::nullopt;
+ if (ArgUse.Offset) {
+ APInt Offset(PointerSize, 0);
+ if (GEP->accumulateConstantOffset(DL, Offset))
+ NewOffset = *ArgUse.Offset + Offset.getSExtValue();
+ }
+ for (Use &U : GEP->uses())
+ Worklist.push_back({&U, NewOffset});
+ continue;
+ }
+
+ auto *I = cast<Instruction>(U);
+ bool HasWrite = UpdateUseInfo(I, getArgmentAccessInfo(I, ArgUse, DL));
+
+ Result.HasAnyWrite |= HasWrite;
+
+ if (HasWrite && I->getParent() != &EntryBB)
+ Result.HasWriteOutsideEntryBB = true;
+ }
+ return Result;
+}
+
} // end anonymous namespace
namespace llvm {
@@ -867,9 +1063,129 @@ static bool addAccessAttr(Argument *A, Attribute::AttrKind R) {
return true;
}
+static bool inferInitializes(Argument &A, Function &F) {
+ auto ArgumentUses = collectArgumentUsesPerBlock(A, F);
+ // No write anywhere in the function, bail.
+ if (!ArgumentUses.HasAnyWrite)
+ return false;
+
+ auto &UsesPerBlock = ArgumentUses.UsesPerBlock;
+ BasicBlock &EntryBB = F.getEntryBlock();
+ // A map to store the argument ranges initialized by a BasicBlock (including
+ // its successors).
+ DenseMap<const BasicBlock *, ConstantRangeList> Initialized;
+ // Visit the successors of "BB" block and the instructions in BB (post-order)
+ // to get the argument ranges initialized by "BB" (including its successors).
+ // The result will be cached in "Initialized".
+ auto VisitBlock = [&](const BasicBlock *BB) -> ConstantRangeList {
+ auto UPB = UsesPerBlock.find(BB);
+ ConstantRangeList CRL;
+
+ // Start with intersection of successors.
+ // If this block has any clobbering use, we're going to clear out the
+ // ranges at some point in this block anyway, so don't bother looking at
+ // successors.
+ if (UPB == UsesPerBlock.end() || !UPB->second.HasUnknownAccess) {
+ bool HasAddedSuccessor = false;
+ for (auto *Succ : successors(BB)) {
+ if (auto SuccI = Initialized.find(Succ); SuccI != Initialized.end()) {
+ if (HasAddedSuccessor) {
+ CRL = CRL.intersectWith(SuccI->second);
+ } else {
+ CRL = SuccI->second;
+ HasAddedSuccessor = true;
+ }
+ } else {
+ CRL = ConstantRangeList();
+ break;
+ }
+ }
+ }
+
+ if (UPB != UsesPerBlock.end()) {
+ // Sort uses in this block by instruction order.
+ SmallVector<std::pair<Instruction *, ArgumentAccessInfo>, 2> Insts;
+ append_range(Insts, UPB->second.Insts);
+ sort(Insts, [](std::pair<Instruction *, ArgumentAccessInfo> &LHS,
+ std::pair<Instruction *, ArgumentAccessInfo> &RHS) {
+ return LHS.first->comesBefore(RHS.first);
+ });
+
+ // From the end of the block to the beginning of the block, set
+ // initializes ranges.
+ for (auto &[_, Info] : reverse(Insts)) {
+ if (Info.ArgAccessType == ArgumentAccessInfo::AccessType::Unknown ||
+ Info.ArgAccessType ==
+ ArgumentAccessInfo::AccessType::WriteWithSideEffect)
+ CRL = ConstantRangeList();
+ if (!Info.AccessRanges.empty()) {
+ if (Info.ArgAccessType == ArgumentAccessInfo::AccessType::Write ||
+ Info.ArgAccessType ==
+ ArgumentAccessInfo::AccessType::WriteWithSideEffect) {
+ CRL = CRL.unionWith(Info.AccessRanges);
+ } else {
+ assert(Info.ArgAccessType == ArgumentAccessInfo::AccessType::Read);
+ for (const auto &ReadRange : Info.AccessRanges)
+ CRL.subtract(ReadRange);
+ }
+ }
+ }
+ }
+ return CRL;
+ };
+
+ ConstantRangeList EntryCRL;
+ // If all write instructions are in the EntryBB, or if the EntryBB has
+ // a clobbering use, we only need to look at EntryBB.
+ bool OnlyScanEntryBlock = !ArgumentUses.HasWriteOutsideEntryBB;
+ if (!OnlyScanEntryBlock)
+ if (auto EntryUPB = UsesPerBlock.find(&EntryBB);
+ EntryUPB != UsesPerBlock.end())
+ OnlyScanEntryBlock = EntryUPB->second.HasUnknownAccess;
+ if (OnlyScanEntryBlock) {
+ EntryCRL = VisitBlock(&EntryBB);
+ if (EntryCRL.empty())
+ return false;
+ } else {
+ // Now we have to go through CFG to get the initialized argument ranges
+ // across blocks. With dominance and post-dominance, the initialized ranges
+ // by a block include both accesses inside this block and accesses in its
+ // (transitive) successors. So visit successors before predecessors with a
+ // post-order walk of the blocks and memorize the results in "Initialized".
+ for (const BasicBlock *BB : post_order(&F)) {
+ ConstantRangeList CRL = VisitBlock(BB);
+ if (!CRL.empty())
+ Initialized[BB] = CRL;
+ }
+
+ auto EntryCRLI = Initialized.find(&EntryBB);
+ if (EntryCRLI == Initialized.end())
+ return false;
+
+ EntryCRL = EntryCRLI->second;
+ }
+
+ assert(!EntryCRL.empty() &&
+ "should have bailed already if EntryCRL is empty");
+
+ if (A.hasAttribute(Attribute::Initializes)) {
+ ConstantRangeList PreviousCRL =
+ A.getAttribute(Attribute::Initializes).getValueAsConstantRangeList();
+ if (PreviousCRL == EntryCRL)
+ return false;
+ EntryCRL = EntryCRL.unionWith(PreviousCRL);
+ }
+
+ A.addAttr(Attribute::get(A.getContext(), Attribute::Initializes,
+ EntryCRL.rangesRef()));
+
+ return true;
+}
+
/// Deduce nocapture attributes for the SCC.
static void addArgumentAttrs(const SCCNodeSet &SCCNodes,
- SmallSet<Function *, 8> &Changed) {
+ SmallSet<Function *, 8> &Changed,
+ bool SkipInitializes) {
ArgumentGraph AG;
// Check each function in turn, determining which pointer arguments are not
@@ -937,6 +1253,10 @@ static void addArgumentAttrs(const SCCNodeSet &SCCNodes,
if (addAccessAttr(&A, R))
Changed.insert(F);
}
+ if (!SkipInitializes && !A.onlyReadsMemory()) {
+ if (inferInitializes(A, *F))
+ Changed.insert(F);
+ }
}
}
@@ -1910,13 +2230,16 @@ deriveAttrsInPostOrder(ArrayRef<Function *> Functions, AARGetterT &&AARGetter,
SmallSet<Function *, 8> Changed;
if (ArgAttrsOnly) {
- addArgumentAttrs(Nodes.SCCNodes, Changed);
+ // ArgAttrsOnly means to only infer attributes that may aid optimizations
+ // on the *current* function. "initializes" attribute is to aid
+ // optimizations (like DSE) on the callers, so skip "initializes" here.
+ addArgumentAttrs(Nodes.SCCNodes, Changed, /*SkipInitializes=*/true);
return Changed;
}
addArgumentReturnedAttrs(Nodes.SCCNodes, Changed);
addMemoryAttrs(Nodes.SCCNodes, AARGetter, Changed);
- addArgumentAttrs(Nodes.SCCNodes, Changed);
+ addArgumentAttrs(Nodes.SCCNodes, Changed, /*SkipInitializes=*/false);
inferConvergent(Nodes.SCCNodes, Changed);
addNoReturnAttrs(Nodes.SCCNodes, Changed);
addColdAttrs(Nodes.SCCNodes, Changed);
diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/functionattrs.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/functionattrs.ll
index bea56a72bdeaef..8615363a985d11 100644
--- a/llvm/test/Analysis/TypeBasedAliasAnalysis/functionattrs.ll
+++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/functionattrs.ll
@@ -15,7 +15,7 @@ define void @test0_yes(ptr %p) nounwind {
ret void
}
-; CHECK: define void @test0_no(ptr nocapture writeonly %p) #1 {
+; CHECK: define void @test0_no(ptr nocapture writeonly initializes((0, 4)) %p) #1 {
define void @test0_no(ptr %p) nounwind {
store i32 0, ptr %p, !tbaa !2
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-libcall-sincos-pass-ordering.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-libcall-sincos-pass-ordering.ll
index 6b835bb4eef662..317a069eed26e9 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-libcall-sincos-pass-ordering.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-libcall-sincos-pass-ordering.ll
@@ -10,7 +10,7 @@
; Should have call to sincos declarations, not calls to the asm pseudo-libcalls
define protected amdgpu_kernel void @swdev456865(ptr addrspace(1) %out0, ptr addrspace(1) %out1, ptr addrspace(1) %out2, float noundef %x) #0 {
; CHECK-LABEL: define protected amdgpu_kernel void @swdev456865(
-; CHECK-SAME: ptr addrspace(1) nocapture writeonly [[OUT0:%.*]], ptr addrspace(1) nocapture writeonly [[OUT1:%.*]], ptr addrspace(1) nocapture writeonly [[OUT2:%.*]], float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[OUT0:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[OUT1:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[OUT2:%.*]], float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[I_I:%.*]] = call float @_Z6sincosfPU3AS5f(float [[X]], ptr addrspace(5) [[__SINCOS_]]) #[[ATTR1:[0-9]+]]
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-sincos.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-sincos.ll
index 1358d91ae102c9..07587eaacd7034 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-sincos.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-sincos.ll
@@ -49,7 +49,7 @@ declare float @_Z6sincosfPU3AS0f(float %x, ptr writeonly %ptr) #1
define void @sincos_f16_nocontract(half %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f16_nocontract
-; CHECK-SAME: (half [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2:[0-9]+]] {
+; CHECK-SAME: (half [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 2)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 2)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2:[0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call half @_Z3sinDh(half [[X]])
; CHECK-NEXT: store half [[CALL]], ptr addrspace(1) [[SIN_OUT]], align 2
@@ -68,7 +68,7 @@ entry:
define void @sincos_v2f16_nocontract(<2 x half> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v2f16_nocontract
-; CHECK-SAME: (<2 x half> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CHECK-SAME: (<2 x half> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x half> @_Z3sinDv2_Dh(<2 x half> [[X]])
; CHECK-NEXT: store <2 x half> [[CALL]], ptr addrspace(1) [[SIN_OUT]], align 4
@@ -87,7 +87,7 @@ entry:
define void @sincos_f16(half %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f16
-; CHECK-SAME: (half [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CHECK-SAME: (half [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 2)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 2)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call contract half @_Z3sinDh(half [[X]])
; CHECK-NEXT: store half [[CALL]], ptr addrspace(1) [[SIN_OUT]], align 2
@@ -105,7 +105,7 @@ entry:
define void @sincos_f16_order1(half %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f16_order1
-; CHECK-SAME: (half [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CHECK-SAME: (half [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 2)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 2)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL1:%.*]] = tail call contract half @_Z3cosDh(half [[X]])
; CHECK-NEXT: store half [[CALL1]], ptr addrspace(1) [[COS_OUT]], align 2
@@ -123,7 +123,7 @@ entry:
define void @sincos_v2f16(<2 x half> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v2f16
-; CHECK-SAME: (<2 x half> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CHECK-SAME: (<2 x half> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call contract <2 x half> @_Z3sinDv2_Dh(<2 x half> [[X]])
; CHECK-NEXT: store <2 x half> [[CALL]], ptr addrspace(1) [[SIN_OUT]], align 4
@@ -141,7 +141,7 @@ entry:
define void @sincos_v3f16(<3 x half> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v3f16
-; CHECK-SAME: (<3 x half> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CHECK-SAME: (<3 x half> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call contract <3 x half> @_Z3sinDv3_Dh(<3 x half> [[X]])
; CHECK-NEXT: [[EXTRACTVEC2:%.*]] = shufflevector <3 x half> [[CALL]], <3 x half> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
@@ -164,7 +164,7 @@ entry:
define void @sincos_v4f16(<4 x half> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v4f16
-; CHECK-SAME: (<4 x half> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CHECK-SAME: (<4 x half> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call contract <4 x half> @_Z3sinDv4_Dh(<4 x half> [[X]])
; CHECK-NEXT: store <4 x half> [[CALL]], ptr addrspace(1) [[SIN_OUT]], align 8
@@ -182,7 +182,7 @@ entry:
define void @sincos_v8f16(<8 x half> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v8f16
-; CHECK-SAME: (<8 x half> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CHECK-SAME: (<8 x half> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 16)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 16)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call contract <8 x half> @_Z3sinDv8_Dh(<8 x half> [[X]])
; CHECK-NEXT: store <8 x half> [[CALL]], ptr addrspace(1) [[SIN_OUT]], align 16
@@ -201,7 +201,7 @@ entry:
define void @sincos_v16f16(<16 x half> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v16f16
-; CHECK-SAME: (<16 x half> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CHECK-SAME: (<16 x half> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 32)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 32)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call contract <16 x half> @_Z3sinDv16_Dh(<16 x half> [[X]])
; CHECK-NEXT: store <16 x half> [[CALL]], ptr addrspace(1) [[SIN_OUT]], align 32
@@ -220,7 +220,7 @@ entry:
define void @sincos_f32_nocontract(float %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32_nocontract
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3:[0-9]+]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3:[0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call float @_Z6sincosfPU3AS5f(float [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -240,7 +240,7 @@ entry:
define void @sincos_v2f32_nocontract(<2 x float> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v2f32_nocontract
-; CHECK-SAME: (<2 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4:[0-9]+]] {
+; CHECK-SAME: (<2 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4:[0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca <2 x float>, align 8, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call <2 x float> @_Z6sincosDv2_fPU3AS5S_(<2 x float> [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -260,7 +260,7 @@ entry:
define void @sincos_f32(float %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract float @_Z6sincosfPU3AS5f(float [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -279,7 +279,7 @@ entry:
define void @sincos_f32_order1(float %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32_order1
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract float @_Z6sincosfPU3AS5f(float [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -298,7 +298,7 @@ entry:
define void @sincos_v2f32(<2 x float> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v2f32
-; CHECK-SAME: (<2 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (<2 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca <2 x float>, align 8, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract <2 x float> @_Z6sincosDv2_fPU3AS5S_(<2 x float> [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -317,7 +317,7 @@ entry:
define void @sincos_v3f32(<3 x float> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v3f32
-; CHECK-SAME: (<3 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (<3 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 16)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 16)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca <3 x float>, align 16, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract <3 x float> @_Z6sincosDv3_fPU3AS5S_(<3 x float> [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -340,7 +340,7 @@ entry:
define void @sincos_v4f32(<4 x float> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v4f32
-; CHECK-SAME: (<4 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (<4 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 16)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 16)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca <4 x float>, align 16, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract <4 x float> @_Z6sincosDv4_fPU3AS5S_(<4 x float> [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -359,7 +359,7 @@ entry:
define void @sincos_v8f32(<8 x float> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v8f32
-; CHECK-SAME: (<8 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (<8 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 32)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 32)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca <8 x float>, align 32, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract <8 x float> @_Z6sincosDv8_fPU3AS5S_(<8 x float> [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -378,7 +378,7 @@ entry:
define void @sincos_v16f32(<16 x float> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v16f32
-; CHECK-SAME: (<16 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (<16 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 64)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 64)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca <16 x float>, align 64, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract <16 x float> @_Z6sincosDv16_fPU3AS5S_(<16 x float> [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -397,7 +397,7 @@ entry:
define void @sincos_f64_nocontract(double %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f64_nocontract
-; CHECK-SAME: (double [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (double [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca double, align 8, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call double @_Z6sincosdPU3AS5d(double [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -417,7 +417,7 @@ entry:
define void @sincos_v2f64_nocontract(<2 x double> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v2f64_nocontract
-; CHECK-SAME: (<2 x double> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (<2 x double> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 16)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 16)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca <2 x double>, align 16, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @_Z6sincosDv2_dPU3AS5S_(<2 x double> [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -436,7 +436,7 @@ entry:
define void @sincos_f64(double %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f64
-; CHECK-SAME: (double [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (double [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca double, align 8, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract double @_Z6sincosdPU3AS5d(double [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -455,7 +455,7 @@ entry:
define void @sincos_f64_order1(double %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f64_order1
-; CHECK-SAME: (double [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (double [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca double, align 8, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract double @_Z6sincosdPU3AS5d(double [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -474,7 +474,7 @@ entry:
define void @sincos_v2f64(<2 x double> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v2f64
-; CHECK-SAME: (<2 x double> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (<2 x double> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 16)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 16)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca <2 x double>, align 16, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract <2 x double> @_Z6sincosDv2_dPU3AS5S_(<2 x double> [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -493,7 +493,7 @@ entry:
define void @sincos_v3f64(<3 x double> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v3f64
-; CHECK-SAME: (<3 x double> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (<3 x double> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 32)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 32)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca <3 x double>, align 32, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract <3 x double> @_Z6sincosDv3_dPU3AS5S_(<3 x double> [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -516,7 +516,7 @@ entry:
define void @sincos_v4f64(<4 x double> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v4f64
-; CHECK-SAME: (<4 x double> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (<4 x double> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 32)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 32)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca <4 x double>, align 32, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract <4 x double> @_Z6sincosDv4_dPU3AS5S_(<4 x double> [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -535,7 +535,7 @@ entry:
define void @sincos_v8f64(<8 x double> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v8f64
-; CHECK-SAME: (<8 x double> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (<8 x double> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 64)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 64)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca <8 x double>, align 64, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract <8 x double> @_Z6sincosDv8_dPU3AS5S_(<8 x double> [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -554,7 +554,7 @@ entry:
define void @sincos_v16f64(<16 x double> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v16f64
-; CHECK-SAME: (<16 x double> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (<16 x double> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 128)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 128)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca <16 x double>, align 128, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract <16 x double> @_Z6sincosDv16_dPU3AS5S_(<16 x double> [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -607,7 +607,7 @@ bb1:
; The sin and cos are in different blocks but always execute
define void @sincos_f32_different_blocks_dominating_always_execute(i1 %cond, float %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out, ptr addrspace(1) %other) {
; CHECK-LABEL: define void @sincos_f32_different_blocks_dominating_always_execute
-; CHECK-SAME: (i1 [[COND:%.*]], float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[OTHER:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (i1 [[COND:%.*]], float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[OTHER:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract float @_Z6sincosfPU3AS5f(float [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -639,7 +639,7 @@ bb1:
; sin dominates cos but cos doesn't always execute.
define void @sincos_f32_different_blocks_dominating_conditional_execute(i1 %cond, float %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out, ptr addrspace(1) %other) {
; CHECK-LABEL: define void @sincos_f32_different_blocks_dominating_conditional_execute
-; CHECK-SAME: (i1 [[COND:%.*]], float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]], ptr addrspace(1) nocapture readnone [[OTHER:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (i1 [[COND:%.*]], float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]], ptr addrspace(1) nocapture readnone [[OTHER:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract float @_Z6sincosfPU3AS5f(float [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -685,7 +685,7 @@ declare void @func(ptr addrspace(1))
define void @sincos_f32_value_is_instr(ptr addrspace(1) %value.ptr, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32_value_is_instr
-; CHECK-SAME: (ptr addrspace(1) [[VALUE_PTR:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (ptr addrspace(1) [[VALUE_PTR:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: tail call void @func(ptr addrspace(1) [[VALUE_PTR]])
@@ -708,7 +708,7 @@ entry:
define void @sincos_f32_value_is_same_constexpr(ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32_value_is_same_constexpr
-; CHECK-SAME: (ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract float @_Z6sincosfPU3AS5f(float bitcast (i32 ptrtoint (ptr @func to i32) to float), ptr addrspace(5) [[__SINCOS_]])
@@ -727,7 +727,7 @@ entry:
define void @sincos_f32_value_is_different_constexpr(ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32_value_is_different_constexpr
-; CHECK-SAME: (ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call contract float @_Z3sinf(float bitcast (i32 ptrtoint (ptr @func to i32) to float))
; CHECK-NEXT: store float [[CALL]], ptr addrspace(1) [[SIN_OUT]], align 4
@@ -745,7 +745,7 @@ entry:
define void @sincos_f32_value_is_same_constantfp(ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32_value_is_same_constantfp
-; CHECK-SAME: (ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract float @_Z6sincosfPU3AS5f(float 4.200000e+01, ptr addrspace(5) [[__SINCOS_]])
@@ -764,7 +764,7 @@ entry:
define void @sincos_f32_value_is_different_constantfp(ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32_value_is_different_constantfp
-; CHECK-SAME: (ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CHECK-SAME: (ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call contract float @_Z3sinf(float 4.200000e+01)
; CHECK-NEXT: store float [[CALL]], ptr addrspace(1) [[SIN_OUT]], align 4
@@ -782,7 +782,7 @@ entry:
define void @sincos_f32_different_args(float %x, float %y, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32_different_args
-; CHECK-SAME: (float [[X:%.*]], float [[Y:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CHECK-SAME: (float [[X:%.*]], float [[Y:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call contract float @_Z3sinf(float [[X]])
; CHECK-NEXT: store float [[CALL]], ptr addrspace(1) [[SIN_OUT]], align 4
@@ -800,7 +800,7 @@ entry:
define void @sincos_f32_flag_intersect0(float %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32_flag_intersect0
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract float @_Z6sincosfPU3AS5f(float [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -819,7 +819,7 @@ entry:
define void @sincos_f32_flag_intersect1(float %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32_flag_intersect1
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call nnan contract float @_Z6sincosfPU3AS5f(float [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -838,7 +838,7 @@ entry:
define void @sincos_v2f32_flag_intersect1(<2 x float> %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_v2f32_flag_intersect1
-; CHECK-SAME: (<2 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (<2 x float> [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 8)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca <2 x float>, align 8, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call nnan contract <2 x float> @_Z6sincosDv2_fPU3AS5S_(<2 x float> [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -947,7 +947,7 @@ entry:
define void @sin_f32_indirect_call_user(float %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out, ptr %func.ptr) {
; CHECK-LABEL: define void @sin_f32_indirect_call_user
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]], ptr nocapture readonly [[FUNC_PTR:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]], ptr nocapture readonly [[FUNC_PTR:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call contract float @_Z3sinf(float [[X]])
; CHECK-NEXT: store float [[CALL]], ptr addrspace(1) [[SIN_OUT]], align 4
@@ -965,7 +965,7 @@ entry:
define void @cos_f32_indirect_call_user(float %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out, ptr %func.ptr) {
; CHECK-LABEL: define void @cos_f32_indirect_call_user
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]], ptr nocapture readonly [[FUNC_PTR:%.*]]) local_unnamed_addr #[[ATTR4]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]], ptr nocapture readonly [[FUNC_PTR:%.*]]) local_unnamed_addr #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = tail call contract float @_Z3cosf(float [[X]])
; CHECK-NEXT: store float [[CALL]], ptr addrspace(1) [[COS_OUT]], align 4
@@ -983,7 +983,7 @@ entry:
define void @sincos_f32_preserve_fpmath_0(float %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32_preserve_fpmath_0
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract float @_Z6sincosfPU3AS5f(float [[X]], ptr addrspace(5) [[__SINCOS_]]), !fpmath [[META5:![0-9]+]]
@@ -1002,7 +1002,7 @@ entry:
define void @sincos_f32_preserve_fpmath_1(float %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32_preserve_fpmath_1
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract float @_Z6sincosfPU3AS5f(float [[X]], ptr addrspace(5) [[__SINCOS_]]), !fpmath [[META6:![0-9]+]]
@@ -1022,7 +1022,7 @@ entry:
; Should drop the metadata
define void @sincos_f32_drop_fpmath(float %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) {
; CHECK-LABEL: define void @sincos_f32_drop_fpmath
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[TMP0:%.*]] = call contract float @_Z6sincosfPU3AS5f(float [[X]], ptr addrspace(5) [[__SINCOS_]])
@@ -1041,7 +1041,7 @@ entry:
define void @sincos_f32_debuginfo(float %x, ptr addrspace(1) nocapture writeonly %sin_out, ptr addrspace(1) nocapture writeonly %cos_out) !dbg !15 {
; CHECK-LABEL: define void @sincos_f32_debuginfo
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] !dbg [[DBG7:![0-9]+]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] !dbg [[DBG7:![0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5), !dbg [[DBG14:![0-9]+]]
; CHECK-NEXT: [[TMP0:%.*]] = call contract float @_Z6sincosfPU3AS5f(float [[X]], ptr addrspace(5) [[__SINCOS_]]), !dbg [[DBG14]]
@@ -1064,7 +1064,7 @@ entry:
define float @sin_sincos_private_f32(float %x, ptr addrspace(1) %sin_out, ptr addrspace(1) %cos_out) {
; CHECK-LABEL: define float @sin_sincos_private_f32
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[COS_TMP:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[SIN0:%.*]] = tail call nnan ninf nsz contract float @_Z3sinf(float [[X]]), !fpmath [[META5]]
@@ -1086,7 +1086,7 @@ entry:
define float @sin_sincos_generic_f32(float %x, ptr addrspace(1) %sin_out, ptr addrspace(1) %cos_out) {
; CHECK-LABEL: define float @sin_sincos_generic_f32
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[COS_TMP:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[SIN0:%.*]] = tail call nsz contract float @_Z3sinf(float [[X]]), !fpmath [[META5]]
@@ -1110,7 +1110,7 @@ entry:
define float @cos_sincos_private_f32(float %x, ptr addrspace(1) %sin_out, ptr addrspace(1) %cos_out) {
; CHECK-LABEL: define float @cos_sincos_private_f32
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture readnone [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture readnone [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[COS_TMP:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[COS0:%.*]] = tail call contract float @_Z3cosf(float [[X]])
@@ -1132,7 +1132,7 @@ entry:
define float @cos_sincos_generic_f32(float %x, ptr addrspace(1) %sin_out, ptr addrspace(1) %cos_out) {
; CHECK-LABEL: define float @cos_sincos_generic_f32
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture readnone [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture readnone [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[COS_TMP:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[COS0:%.*]] = tail call contract float @_Z3cosf(float [[X]])
@@ -1156,7 +1156,7 @@ entry:
define float @sincos_private_f32_x2(float %x, ptr addrspace(1) %sin_out, ptr addrspace(1) %cos_out) {
; CHECK-LABEL: define float @sincos_private_f32_x2
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture readnone [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture readnone [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[COS_TMP0:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[COS_TMP1:%.*]] = alloca float, align 4, addrspace(5)
@@ -1184,7 +1184,7 @@ entry:
define float @sincos_generic_f32_x2(float %x, ptr addrspace(1) %sin_out, ptr addrspace(1) %cos_out) {
; CHECK-LABEL: define float @sincos_generic_f32_x2
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture readnone [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture readnone [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[COS_TMP0:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[COS_TMP1:%.*]] = alloca float, align 4, addrspace(5)
@@ -1213,7 +1213,7 @@ entry:
define float @sincos_generic_private_f32(float %x, ptr addrspace(1) %sin_out, ptr addrspace(1) %cos_out) {
; CHECK-LABEL: define float @sincos_generic_private_f32
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture readnone [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture readnone [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[COS_TMP0:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[COS_TMP1:%.*]] = alloca float, align 4, addrspace(5)
@@ -1240,7 +1240,7 @@ entry:
define float @sincos_mixed_sin_cos_generic_private_f32(float %x, ptr addrspace(1) %sin_out, ptr addrspace(1) %cos_out) {
; CHECK-LABEL: define float @sincos_mixed_sin_cos_generic_private_f32
-; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CHECK-SAME: (float [[X:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[SIN_OUT:%.*]], ptr addrspace(1) nocapture writeonly initializes((0, 4)) [[COS_OUT:%.*]]) local_unnamed_addr #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[COS_TMP0:%.*]] = alloca float, align 4, addrspace(5)
; CHECK-NEXT: [[__SINCOS_:%.*]] = alloca float, align 4, addrspace(5)
diff --git a/llvm/test/CodeGen/BPF/preserve-static-offset/store-zero.ll b/llvm/test/CodeGen/BPF/preserve-static-offset/store-zero.ll
index 7f2a06af8d10f9..d3929a3706ba85 100644
--- a/llvm/test/CodeGen/BPF/preserve-static-offset/store-zero.ll
+++ b/llvm/test/CodeGen/BPF/preserve-static-offset/store-zero.ll
@@ -28,7 +28,7 @@ entry:
ret void
}
-; CHECK: define dso_local void @bar(ptr nocapture noundef writeonly %[[p:.*]])
+; CHECK: define dso_local void @bar(ptr nocapture noundef writeonly initializes((0, 4)) %[[p:.*]])
; CHECK-NEXT: entry:
; CHECK-NEXT: store i32 0, ptr %[[p]], align 4, !tbaa
; CHECK-NEXT: ret void
diff --git a/llvm/test/Other/optimize-inrange-gep.ll b/llvm/test/Other/optimize-inrange-gep.ll
index e7465fddd80f0c..66cf7f2c17f98e 100644
--- a/llvm/test/Other/optimize-inrange-gep.ll
+++ b/llvm/test/Other/optimize-inrange-gep.ll
@@ -19,7 +19,7 @@ define void @foo(ptr %p) {
; O0-NEXT: ret void
;
; CHECK-LABEL: define void @foo(
-; CHECK-SAME: ptr nocapture writeonly [[P:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ptr nocapture writeonly initializes((0, 8)) [[P:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: store ptr getelementptr inbounds inrange(-24, 0) (i8, ptr @vtable, i64 24), ptr [[P]], align 8
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/Coroutines/coro-async.ll b/llvm/test/Transforms/Coroutines/coro-async.ll
index 3740c3d1d83871..f02d0a242dc992 100644
--- a/llvm/test/Transforms/Coroutines/coro-async.ll
+++ b/llvm/test/Transforms/Coroutines/coro-async.ll
@@ -116,7 +116,7 @@ define void @my_async_function_pa(ptr %ctxt, ptr %task, ptr %actor) {
; CHECK: @my_async_function_pa_fp = constant <{ i32, i32 }> <{ {{.*}}, i32 176 }
; CHECK: @my_async_function2_fp = constant <{ i32, i32 }> <{ {{.*}}, i32 176 }
-; CHECK-LABEL: define swiftcc void @my_async_function(ptr swiftasync %async.ctxt, ptr %task, ptr %actor)
+; CHECK-LABEL: define swiftcc void @my_async_function(ptr swiftasync initializes((152, 160)) %async.ctxt, ptr %task, ptr %actor)
; CHECK-O0-LABEL: define swiftcc void @my_async_function(ptr swiftasync %async.ctxt, ptr %task, ptr %actor)
; CHECK-SAME: !dbg ![[SP1:[0-9]+]] {
; CHECK: coro.return:
@@ -249,7 +249,7 @@ define swiftcc void @top_level_caller(ptr %ctxt, ptr %task, ptr %actor) {
ret void
}
-; CHECK-LABEL: define swiftcc void @top_level_caller(ptr %ctxt, ptr %task, ptr %actor)
+; CHECK-LABEL: define swiftcc void @top_level_caller(ptr initializes((152, 160)) %ctxt, ptr %task, ptr %actor)
; CHECK: store ptr @my_async_functionTQ0_
; CHECK: store ptr %ctxt
; CHECK: tail call swiftcc void @asyncSuspend
@@ -410,7 +410,7 @@ entry:
unreachable
}
-; CHECK-LABEL: define swiftcc void @polymorphic_suspend_return(ptr swiftasync %async.ctxt, ptr %task, ptr %actor)
+; CHECK-LABEL: define swiftcc void @polymorphic_suspend_return(ptr swiftasync initializes((152, 160)) %async.ctxt, ptr %task, ptr %actor)
; CHECK-LABEL: define internal swiftcc void @polymorphic_suspend_return.resume.0(ptr {{.*}}swiftasync{{.*}} %0, ptr {{.*}}swiftself{{.*}} %1, ptr {{.*}}%2, ptr {{.*}}%3)
; CHECK: }
diff --git a/llvm/test/Transforms/FunctionAttrs/argmemonly.ll b/llvm/test/Transforms/FunctionAttrs/argmemonly.ll
index 10760e3b8b8b81..5bbe6fa7c27c2e 100644
--- a/llvm/test/Transforms/FunctionAttrs/argmemonly.ll
+++ b/llvm/test/Transforms/FunctionAttrs/argmemonly.ll
@@ -101,7 +101,7 @@ entry:
define void @test_only_write_arg(ptr %ptr) {
; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
; FNATTRS-LABEL: define void @test_only_write_arg
-; FNATTRS-SAME: (ptr nocapture writeonly [[PTR:%.*]]) #[[ATTR4:[0-9]+]] {
+; FNATTRS-SAME: (ptr nocapture writeonly initializes((0, 4)) [[PTR:%.*]]) #[[ATTR4:[0-9]+]] {
; FNATTRS-NEXT: entry:
; FNATTRS-NEXT: store i32 0, ptr [[PTR]], align 4
; FNATTRS-NEXT: ret void
@@ -156,7 +156,7 @@ declare i32 @fn_readnone() readnone
define void @test_call_readnone(ptr %ptr) {
; FNATTRS: Function Attrs: memory(argmem: write)
; FNATTRS-LABEL: define void @test_call_readnone
-; FNATTRS-SAME: (ptr nocapture writeonly [[PTR:%.*]]) #[[ATTR7:[0-9]+]] {
+; FNATTRS-SAME: (ptr nocapture writeonly initializes((0, 4)) [[PTR:%.*]]) #[[ATTR7:[0-9]+]] {
; FNATTRS-NEXT: entry:
; FNATTRS-NEXT: [[C:%.*]] = call i32 @fn_readnone()
; FNATTRS-NEXT: store i32 [[C]], ptr [[PTR]], align 4
@@ -221,7 +221,7 @@ entry:
define void @test_memcpy_argonly(ptr %dst, ptr %src) {
; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
; FNATTRS-LABEL: define void @test_memcpy_argonly
-; FNATTRS-SAME: (ptr nocapture writeonly [[DST:%.*]], ptr nocapture readonly [[SRC:%.*]]) #[[ATTR9:[0-9]+]] {
+; FNATTRS-SAME: (ptr nocapture writeonly initializes((0, 32)) [[DST:%.*]], ptr nocapture readonly [[SRC:%.*]]) #[[ATTR9:[0-9]+]] {
; FNATTRS-NEXT: entry:
; FNATTRS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DST]], ptr [[SRC]], i64 32, i1 false)
; FNATTRS-NEXT: ret void
@@ -245,7 +245,7 @@ declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)
define void @test_memcpy_src_global(ptr %dst) {
; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(readwrite, inaccessiblemem: none)
; FNATTRS-LABEL: define void @test_memcpy_src_global
-; FNATTRS-SAME: (ptr nocapture writeonly [[DST:%.*]]) #[[ATTR11:[0-9]+]] {
+; FNATTRS-SAME: (ptr nocapture writeonly initializes((0, 32)) [[DST:%.*]]) #[[ATTR11:[0-9]+]] {
; FNATTRS-NEXT: entry:
; FNATTRS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DST]], ptr @arr, i64 32, i1 false)
; FNATTRS-NEXT: ret void
@@ -370,7 +370,7 @@ define void @test_inaccessibleorargmemonly_readonly(ptr %arg) {
define void @test_inaccessibleorargmemonly_readwrite(ptr %arg) {
; FNATTRS: Function Attrs: memory(argmem: write, inaccessiblemem: read)
; FNATTRS-LABEL: define void @test_inaccessibleorargmemonly_readwrite
-; FNATTRS-SAME: (ptr nocapture writeonly [[ARG:%.*]]) #[[ATTR15:[0-9]+]] {
+; FNATTRS-SAME: (ptr nocapture writeonly initializes((0, 4)) [[ARG:%.*]]) #[[ATTR15:[0-9]+]] {
; FNATTRS-NEXT: store i32 0, ptr [[ARG]], align 4
; FNATTRS-NEXT: call void @fn_inaccessiblememonly() #[[ATTR19]]
; FNATTRS-NEXT: ret void
diff --git a/llvm/test/Transforms/FunctionAttrs/initializes.ll b/llvm/test/Transforms/FunctionAttrs/initializes.ll
new file mode 100644
index 00000000000000..2aa8385fe4ca7b
--- /dev/null
+++ b/llvm/test/Transforms/FunctionAttrs/initializes.ll
@@ -0,0 +1,572 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --version 4
+; RUN: opt -passes=function-attrs -S < %s | FileCheck %s
+
+define void @basic(ptr %p) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @basic(
+; CHECK-SAME: ptr nocapture writeonly initializes((0, 8)) [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: store i64 123, ptr [[P]], align 4
+; CHECK-NEXT: ret void
+;
+ store i64 123, ptr %p
+ ret void
+}
+
+define void @stores_on_both_paths(ptr %p, i1 %i) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @stores_on_both_paths(
+; CHECK-SAME: ptr nocapture writeonly initializes((0, 8)) [[P:%.*]], i1 [[I:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[I]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: store i64 123, ptr [[P]], align 4
+; CHECK-NEXT: br label [[END:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: store i64 321, ptr [[P]], align 4
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ br i1 %i, label %bb1, label %bb2
+bb1:
+ store i64 123, ptr %p
+ br label %end
+bb2:
+ store i64 321, ptr %p
+ br label %end
+end:
+ ret void
+}
+
+define void @store_pointer_to_pointer(ptr %p, ptr %p2) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @store_pointer_to_pointer(
+; CHECK-SAME: ptr [[P:%.*]], ptr nocapture writeonly initializes((0, 8)) [[P2:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: store ptr [[P]], ptr [[P2]], align 8
+; CHECK-NEXT: ret void
+;
+ store ptr %p, ptr %p2
+ ret void
+}
+
+; TODO: this is still initializes
+define void @store_pointer_to_itself(ptr %p) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @store_pointer_to_itself(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: store ptr [[P]], ptr [[P]], align 8
+; CHECK-NEXT: ret void
+;
+ store ptr %p, ptr %p
+ ret void
+}
+
+define void @load_before_store(ptr %p) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
+; CHECK-LABEL: define void @load_before_store(
+; CHECK-SAME: ptr nocapture [[P:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: store i32 123, ptr [[P]], align 4
+; CHECK-NEXT: ret void
+;
+ %a = load i32, ptr %p
+ store i32 123, ptr %p
+ ret void
+}
+
+define void @partial_load_before_store(ptr %p) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
+; CHECK-LABEL: define void @partial_load_before_store(
+; CHECK-SAME: ptr nocapture initializes((4, 8)) [[P:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: store i64 123, ptr [[P]], align 4
+; CHECK-NEXT: ret void
+;
+ %a = load i32, ptr %p
+ store i64 123, ptr %p
+ ret void
+}
+
+declare void @use(ptr)
+
+define void @call_clobber(ptr %p) {
+; CHECK-LABEL: define void @call_clobber(
+; CHECK-SAME: ptr [[P:%.*]]) {
+; CHECK-NEXT: call void @use(ptr [[P]])
+; CHECK-NEXT: store i64 123, ptr [[P]], align 4
+; CHECK-NEXT: ret void
+;
+ call void @use(ptr %p)
+ store i64 123, ptr %p
+ ret void
+}
+
+define void @call_clobber_after_store(ptr %p) {
+; CHECK-LABEL: define void @call_clobber_after_store(
+; CHECK-SAME: ptr initializes((0, 8)) [[P:%.*]]) {
+; CHECK-NEXT: store i64 123, ptr [[P]], align 4
+; CHECK-NEXT: call void @use(ptr [[P]])
+; CHECK-NEXT: ret void
+;
+ store i64 123, ptr %p
+ call void @use(ptr %p)
+ ret void
+}
+
+define void @store_offset(ptr %p) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @store_offset(
+; CHECK-SAME: ptr nocapture writeonly initializes((8, 12)) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[G:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; CHECK-NEXT: store i32 123, ptr [[G]], align 4
+; CHECK-NEXT: ret void
+;
+ %g = getelementptr i8, ptr %p, i64 8
+ store i32 123, ptr %g
+ ret void
+}
+
+define void @store_volatile(ptr %p) {
+; CHECK: Function Attrs: nofree norecurse nounwind memory(argmem: readwrite, inaccessiblemem: readwrite)
+; CHECK-LABEL: define void @store_volatile(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR2:[0-9]+]] {
+; CHECK-NEXT: [[G:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; CHECK-NEXT: store volatile i32 123, ptr [[G]], align 4
+; CHECK-NEXT: ret void
+;
+ %g = getelementptr i8, ptr %p, i64 8
+ store volatile i32 123, ptr %g
+ ret void
+}
+
+define void @merge_store_ranges(ptr %p) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @merge_store_ranges(
+; CHECK-SAME: ptr nocapture writeonly initializes((0, 8)) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[G:%.*]] = getelementptr i8, ptr [[P]], i64 4
+; CHECK-NEXT: store i32 123, ptr [[G]], align 4
+; CHECK-NEXT: store i32 123, ptr [[P]], align 4
+; CHECK-NEXT: ret void
+;
+ %g = getelementptr i8, ptr %p, i64 4
+ store i32 123, ptr %g
+ store i32 123, ptr %p
+ ret void
+}
+
+define void @partially_overlapping_stores_branches(ptr %p, i1 %i) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
+; CHECK-LABEL: define void @partially_overlapping_stores_branches(
+; CHECK-SAME: ptr nocapture initializes((4, 8)) [[P:%.*]], i1 [[I:%.*]]) #[[ATTR3:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[P]]
+; CHECK-NEXT: [[G:%.*]] = getelementptr i8, ptr [[P]], i64 4
+; CHECK-NEXT: br i1 [[I]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: store i64 123, ptr [[G]], align 4
+; CHECK-NEXT: br label [[END:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: store i64 321, ptr [[P]], align 4
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %a = load i32, ptr %p
+ %g = getelementptr i8, ptr %p, i64 4
+ br i1 %i, label %bb1, label %bb2
+bb1:
+ store i64 123, ptr %g
+ br label %end
+bb2:
+ store i64 321, ptr %p
+ br label %end
+end:
+ ret void
+}
+
+define void @non_overlapping_stores_branches(ptr %p, i1 %i) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @non_overlapping_stores_branches(
+; CHECK-SAME: ptr nocapture writeonly [[P:%.*]], i1 [[I:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[G:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; CHECK-NEXT: br i1 [[I]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: store i64 123, ptr [[G]], align 4
+; CHECK-NEXT: br label [[END:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: store i64 321, ptr [[P]], align 4
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %g = getelementptr i8, ptr %p, i64 8
+ br i1 %i, label %bb1, label %bb2
+bb1:
+ store i64 123, ptr %g
+ br label %end
+bb2:
+ store i64 321, ptr %p
+ br label %end
+end:
+ ret void
+}
+
+define void @dominating_store(ptr %p, i1 %i) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @dominating_store(
+; CHECK-SAME: ptr nocapture writeonly initializes((0, 8)) [[P:%.*]], i1 [[I:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[I]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[END:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: store i64 321, ptr [[P]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ br i1 %i, label %bb1, label %bb2
+bb1:
+ br label %end
+bb2:
+ br label %end
+end:
+ store i64 321, ptr %p
+ ret void
+}
+
+define void @call_clobber_on_one_branch(ptr %p, i1 %i) {
+; CHECK-LABEL: define void @call_clobber_on_one_branch(
+; CHECK-SAME: ptr [[P:%.*]], i1 [[I:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[I]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[END:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: call void @use(ptr [[P]])
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: store i64 321, ptr [[P]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ br i1 %i, label %bb1, label %bb2
+bb1:
+ br label %end
+bb2:
+ call void @use(ptr %p)
+ br label %end
+end:
+ store i64 321, ptr %p
+ ret void
+}
+
+define void @merge_existing_initializes(ptr initializes((33, 36)) %p) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @merge_existing_initializes(
+; CHECK-SAME: ptr nocapture writeonly initializes((0, 8), (33, 36)) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: store i64 123, ptr [[P]], align 4
+; CHECK-NEXT: ret void
+;
+ store i64 123, ptr %p
+ ret void
+}
+
+define void @negative_offset(ptr %p) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @negative_offset(
+; CHECK-SAME: ptr nocapture writeonly initializes((-5, 3)) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[G:%.*]] = getelementptr i8, ptr [[P]], i64 -5
+; CHECK-NEXT: store i64 123, ptr [[G]], align 4
+; CHECK-NEXT: ret void
+;
+ %g = getelementptr i8, ptr %p, i64 -5
+ store i64 123, ptr %g
+ ret void
+}
+
+define void @non_const_gep(ptr %p, i64 %i) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @non_const_gep(
+; CHECK-SAME: ptr nocapture writeonly initializes((0, 8)) [[P:%.*]], i64 [[I:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[G:%.*]] = getelementptr i8, ptr [[P]], i64 [[I]]
+; CHECK-NEXT: store i64 123, ptr [[G]], align 4
+; CHECK-NEXT: store i64 123, ptr [[P]], align 4
+; CHECK-NEXT: ret void
+;
+ %g = getelementptr i8, ptr %p, i64 %i
+ store i64 123, ptr %g
+ store i64 123, ptr %p
+ ret void
+}
+
+define void @call_clobber_in_entry_block(ptr %p, i1 %i) {
+; CHECK-LABEL: define void @call_clobber_in_entry_block(
+; CHECK-SAME: ptr [[P:%.*]], i1 [[I:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: call void @use(ptr [[P]])
+; CHECK-NEXT: br i1 [[I]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: store i64 123, ptr [[P]], align 4
+; CHECK-NEXT: br label [[END:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: store i64 321, ptr [[P]], align 4
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
+entry:
+ call void @use(ptr %p)
+ br i1 %i, label %bb1, label %bb2
+bb1:
+ store i64 123, ptr %p
+ br label %end
+bb2:
+ store i64 321, ptr %p
+ br label %end
+end:
+ ret void
+}
+
+declare void @g1(ptr initializes((0, 4)) %p)
+declare void @g2(ptr initializes((8, 12)) %p)
+declare void @g3(ptr initializes((0, 4)) writeonly nocapture %p)
+
+define void @call_initializes(ptr %p) {
+; CHECK-LABEL: define void @call_initializes(
+; CHECK-SAME: ptr initializes((0, 4)) [[P:%.*]]) {
+; CHECK-NEXT: call void @g1(ptr [[P]])
+; CHECK-NEXT: ret void
+;
+ call void @g1(ptr %p)
+ ret void
+}
+
+define void @call_initializes_clobber(ptr %p) {
+; CHECK-LABEL: define void @call_initializes_clobber(
+; CHECK-SAME: ptr initializes((0, 4)) [[P:%.*]]) {
+; CHECK-NEXT: call void @g1(ptr [[P]])
+; CHECK-NEXT: call void @g2(ptr [[P]])
+; CHECK-NEXT: ret void
+;
+ call void @g1(ptr %p)
+ call void @g2(ptr %p)
+ ret void
+}
+
+define void @call_initializes_no_clobber_writeonly_nocapture(ptr %p) {
+; CHECK-LABEL: define void @call_initializes_no_clobber_writeonly_nocapture(
+; CHECK-SAME: ptr initializes((0, 4), (8, 12)) [[P:%.*]]) {
+; CHECK-NEXT: call void @g3(ptr [[P]])
+; CHECK-NEXT: call void @g2(ptr [[P]])
+; CHECK-NEXT: ret void
+;
+ call void @g3(ptr %p)
+ call void @g2(ptr %p)
+ ret void
+}
+
+define void @call_initializes_escape_bundle(ptr %p) {
+; CHECK-LABEL: define void @call_initializes_escape_bundle(
+; CHECK-SAME: ptr [[P:%.*]]) {
+; CHECK-NEXT: call void @g1(ptr [[P]]) [ "unknown"(ptr [[P]]) ]
+; CHECK-NEXT: ret void
+;
+ call void @g1(ptr %p) ["unknown"(ptr %p)]
+ ret void
+}
+
+define void @access_bundle() {
+ %sink = alloca i64, align 8
+ store i64 123, ptr %sink
+ ret void
+}
+
+define void @call_operand_bundle(ptr %p) {
+; CHECK-LABEL: define void @call_operand_bundle(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR4:[0-9]+]] {
+; CHECK-NEXT: call void @access_bundle() [ "unknown"(ptr [[P]]) ]
+; CHECK-NEXT: ret void
+;
+ call void @access_bundle() ["unknown"(ptr %p)]
+ ret void
+}
+
+declare void @llvm.memset(ptr, i8, i64 ,i1)
+
+define void @memset(ptr %p) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @memset(
+; CHECK-SAME: ptr nocapture writeonly initializes((0, 9)) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[P]], i8 2, i64 9, i1 false)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memset(ptr %p, i8 2, i64 9, i1 false)
+ ret void
+}
+
+define void @memset_offset(ptr %p) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @memset_offset(
+; CHECK-SAME: ptr nocapture writeonly initializes((3, 12)) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[G:%.*]] = getelementptr i8, ptr [[P]], i64 3
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[G]], i8 2, i64 9, i1 false)
+; CHECK-NEXT: ret void
+;
+ %g = getelementptr i8, ptr %p, i64 3
+ call void @llvm.memset(ptr %g, i8 2, i64 9, i1 false)
+ ret void
+}
+
+define void @memset_volatile(ptr %p) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @memset_volatile(
+; CHECK-SAME: ptr writeonly [[P:%.*]]) #[[ATTR3:[0-9]+]] {
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[P]], i8 2, i64 9, i1 true)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memset(ptr %p, i8 2, i64 9, i1 true)
+ ret void
+}
+
+define void @memset_non_constant(ptr %p, i64 %i) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @memset_non_constant(
+; CHECK-SAME: ptr nocapture writeonly [[P:%.*]], i64 [[I:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[P]], i8 2, i64 [[I]], i1 false)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memset(ptr %p, i8 2, i64 %i, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy(ptr, ptr, i64 ,i1)
+
+define void @memcpy(ptr %p, ptr %p2) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
+; CHECK-LABEL: define void @memcpy(
+; CHECK-SAME: ptr nocapture writeonly initializes((0, 9)) [[P:%.*]], ptr nocapture readonly [[P2:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[P]], ptr [[P2]], i64 9, i1 false)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memcpy(ptr %p, ptr %p2, i64 9, i1 false)
+ ret void
+}
+
+define void @memcpy_volatile(ptr %p, ptr %p2) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; CHECK-LABEL: define void @memcpy_volatile(
+; CHECK-SAME: ptr writeonly [[P:%.*]], ptr readonly [[P2:%.*]]) #[[ATTR4:[0-9]+]] {
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[P]], ptr [[P2]], i64 9, i1 true)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memcpy(ptr %p, ptr %p2, i64 9, i1 true)
+ ret void
+}
+
+define void @memcpy_offset(ptr %p, ptr %p2) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
+; CHECK-LABEL: define void @memcpy_offset(
+; CHECK-SAME: ptr nocapture writeonly initializes((3, 12)) [[P:%.*]], ptr nocapture readonly [[P2:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[G:%.*]] = getelementptr i8, ptr [[P]], i64 3
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[G]], ptr [[P2]], i64 9, i1 false)
+; CHECK-NEXT: ret void
+;
+ %g = getelementptr i8, ptr %p, i64 3
+ call void @llvm.memcpy(ptr %g, ptr %p2, i64 9, i1 false)
+ ret void
+}
+
+define void @memcpy_src(ptr %p, ptr %p2) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
+; CHECK-LABEL: define void @memcpy_src(
+; CHECK-SAME: ptr nocapture initializes((96, 128)) [[P:%.*]], ptr nocapture initializes((0, 96)) [[P2:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[P2]], ptr [[P]], i64 96, i1 false)
+; CHECK-NEXT: [[G:%.*]] = getelementptr i8, ptr [[P]], i64 64
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[G]], ptr [[P2]], i64 64, i1 false)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memcpy(ptr %p2, ptr %p, i64 96, i1 false)
+ %g = getelementptr i8, ptr %p, i64 64
+ call void @llvm.memcpy(ptr %g, ptr %p2, i64 64, i1 false)
+ ret void
+}
+
+define void @memcpy_non_constant(ptr %p, ptr %p2, i64 %i) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
+; CHECK-LABEL: define void @memcpy_non_constant(
+; CHECK-SAME: ptr nocapture writeonly [[P:%.*]], ptr nocapture readonly [[P2:%.*]], i64 [[I:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[P]], ptr [[P2]], i64 [[I]], i1 false)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memcpy(ptr %p, ptr %p2, i64 %i, i1 false)
+ ret void
+}
+
+declare void @llvm.memmove(ptr, ptr, i64 ,i1)
+
+define void @memmove(ptr %p, ptr %p2) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
+; CHECK-LABEL: define void @memmove(
+; CHECK-SAME: ptr nocapture writeonly initializes((0, 9)) [[P:%.*]], ptr nocapture readonly [[P2:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[P]], ptr [[P2]], i64 9, i1 false)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memmove(ptr %p, ptr %p2, i64 9, i1 false)
+ ret void
+}
+
+define void @memmove_volatile(ptr %p, ptr %p2) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; CHECK-LABEL: define void @memmove_volatile(
+; CHECK-SAME: ptr writeonly [[P:%.*]], ptr readonly [[P2:%.*]]) #[[ATTR4:[0-9]+]] {
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[P]], ptr [[P2]], i64 9, i1 true)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memmove(ptr %p, ptr %p2, i64 9, i1 true)
+ ret void
+}
+
+define void @memmove_offset(ptr %p, ptr %p2) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
+; CHECK-LABEL: define void @memmove_offset(
+; CHECK-SAME: ptr nocapture writeonly initializes((3, 12)) [[P:%.*]], ptr nocapture readonly [[P2:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[G:%.*]] = getelementptr i8, ptr [[P]], i64 3
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[G]], ptr [[P2]], i64 9, i1 false)
+; CHECK-NEXT: ret void
+;
+ %g = getelementptr i8, ptr %p, i64 3
+ call void @llvm.memmove(ptr %g, ptr %p2, i64 9, i1 false)
+ ret void
+}
+
+define void @memmove_src(ptr %p, ptr %p2) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
+; CHECK-LABEL: define void @memmove_src(
+; CHECK-SAME: ptr nocapture initializes((96, 128)) [[P:%.*]], ptr nocapture initializes((0, 96)) [[P2:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[P2]], ptr [[P]], i64 96, i1 false)
+; CHECK-NEXT: [[G:%.*]] = getelementptr i8, ptr [[P]], i64 64
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[G]], ptr [[P2]], i64 64, i1 false)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memmove(ptr %p2, ptr %p, i64 96, i1 false)
+ %g = getelementptr i8, ptr %p, i64 64
+ call void @llvm.memmove(ptr %g, ptr %p2, i64 64, i1 false)
+ ret void
+}
+
+define void @memmove_non_constant(ptr %p, ptr %p2, i64 %i) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
+; CHECK-LABEL: define void @memmove_non_constant(
+; CHECK-SAME: ptr nocapture writeonly [[P:%.*]], ptr nocapture readonly [[P2:%.*]], i64 [[I:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[P]], ptr [[P2]], i64 [[I]], i1 false)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memmove(ptr %p, ptr %p2, i64 %i, i1 false)
+ ret void
+}
diff --git a/llvm/test/Transforms/FunctionAttrs/readattrs.ll b/llvm/test/Transforms/FunctionAttrs/readattrs.ll
index 39513976f90d76..004c0485d764ae 100644
--- a/llvm/test/Transforms/FunctionAttrs/readattrs.ll
+++ b/llvm/test/Transforms/FunctionAttrs/readattrs.ll
@@ -107,7 +107,7 @@ define void @test4_2(ptr %p) {
define void @test5(ptr %p, ptr %q) {
; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
; FNATTRS-LABEL: define {{[^@]+}}@test5
-; FNATTRS-SAME: (ptr nocapture writeonly [[P:%.*]], ptr [[Q:%.*]]) #[[ATTR4:[0-9]+]] {
+; FNATTRS-SAME: (ptr nocapture writeonly initializes((0, 8)) [[P:%.*]], ptr [[Q:%.*]]) #[[ATTR4:[0-9]+]] {
; FNATTRS-NEXT: store ptr [[Q]], ptr [[P]], align 8
; FNATTRS-NEXT: ret void
;
@@ -132,7 +132,7 @@ declare void @test6_1()
; This is not a missed optz'n.
define void @test6_2(ptr %p, ptr %q) {
; FNATTRS-LABEL: define {{[^@]+}}@test6_2
-; FNATTRS-SAME: (ptr nocapture writeonly [[P:%.*]], ptr [[Q:%.*]]) {
+; FNATTRS-SAME: (ptr nocapture writeonly initializes((0, 8)) [[P:%.*]], ptr [[Q:%.*]]) {
; FNATTRS-NEXT: store ptr [[Q]], ptr [[P]], align 8
; FNATTRS-NEXT: call void @test6_1()
; FNATTRS-NEXT: ret void
diff --git a/llvm/test/Transforms/FunctionAttrs/writeonly.ll b/llvm/test/Transforms/FunctionAttrs/writeonly.ll
index de2d5e22389476..ba546aff6e6211 100644
--- a/llvm/test/Transforms/FunctionAttrs/writeonly.ll
+++ b/llvm/test/Transforms/FunctionAttrs/writeonly.ll
@@ -66,7 +66,7 @@ nouses-argworn-funwo_entry:
define void @test_store(ptr %p) {
; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
; FNATTRS-LABEL: define {{[^@]+}}@test_store
-; FNATTRS-SAME: (ptr nocapture writeonly [[P:%.*]]) #[[ATTR3:[0-9]+]] {
+; FNATTRS-SAME: (ptr nocapture writeonly initializes((0, 1)) [[P:%.*]]) #[[ATTR3:[0-9]+]] {
; FNATTRS-NEXT: store i8 0, ptr [[P]], align 1
; FNATTRS-NEXT: ret void
;
@@ -107,7 +107,7 @@ define i8 @test_store_capture(ptr %p) {
define void @test_addressing(ptr %p) {
; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
; FNATTRS-LABEL: define {{[^@]+}}@test_addressing
-; FNATTRS-SAME: (ptr nocapture writeonly [[P:%.*]]) #[[ATTR3]] {
+; FNATTRS-SAME: (ptr nocapture writeonly initializes((8, 12)) [[P:%.*]]) #[[ATTR3]] {
; FNATTRS-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[P]], i64 8
; FNATTRS-NEXT: store i32 0, ptr [[GEP]], align 4
; FNATTRS-NEXT: ret void
diff --git a/llvm/test/Transforms/PGOProfile/memprof_internal_linkage.ll b/llvm/test/Transforms/PGOProfile/memprof_internal_linkage.ll
index 65717d12bd4811..40cb25c62d7246 100644
--- a/llvm/test/Transforms/PGOProfile/memprof_internal_linkage.ll
+++ b/llvm/test/Transforms/PGOProfile/memprof_internal_linkage.ll
@@ -81,4 +81,4 @@ attributes #5 = { builtin allocsize(0) }
!18 = !DILocation(line: 4, column: 8, scope: !16)
!19 = !DILocation(line: 5, column: 10, scope: !16)
!20 = !DILocation(line: 5, column: 3, scope: !16)
-!21 = !DILocation(line: 6, column: 1, scope: !16)
\ No newline at end of file
+!21 = !DILocation(line: 6, column: 1, scope: !16)
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/unroll-vectorizer.ll b/llvm/test/Transforms/PhaseOrdering/X86/unroll-vectorizer.ll
index 1c9e7a771ca19c..ec0c2b40640f49 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/unroll-vectorizer.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/unroll-vectorizer.ll
@@ -8,7 +8,7 @@ target triple = "x86_64-unknown-linux-gnu"
define void @foo(ptr %a, <32 x i8> %_0) #0 {
; CHECK-LABEL: define void @foo(
-; CHECK-SAME: ptr nocapture writeonly [[A:%.*]], <32 x i8> [[_0:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ptr nocapture writeonly initializes((0, 32)) [[A:%.*]], <32 x i8> [[_0:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: start:
; CHECK-NEXT: store <32 x i8> [[_0]], ptr [[A]], align 1
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/PhaseOrdering/memcpy-offset.ll b/llvm/test/Transforms/PhaseOrdering/memcpy-offset.ll
index bd910b82496fd1..5e6eab9d807368 100644
--- a/llvm/test/Transforms/PhaseOrdering/memcpy-offset.ll
+++ b/llvm/test/Transforms/PhaseOrdering/memcpy-offset.ll
@@ -10,7 +10,7 @@ define void @memcpy_forward_back_with_offset(ptr %arg) {
; CUSTOM-NEXT: ret void
;
; O2-LABEL: define void @memcpy_forward_back_with_offset(
-; O2-SAME: ptr nocapture writeonly [[ARG:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; O2-SAME: ptr nocapture writeonly initializes((0, 1)) [[ARG:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; O2-NEXT: store i8 1, ptr [[ARG]], align 1
; O2-NEXT: ret void
;
diff --git a/llvm/test/Transforms/PhaseOrdering/pr95152.ll b/llvm/test/Transforms/PhaseOrdering/pr95152.ll
index fff94673a1a519..016460fed7c350 100644
--- a/llvm/test/Transforms/PhaseOrdering/pr95152.ll
+++ b/llvm/test/Transforms/PhaseOrdering/pr95152.ll
@@ -21,7 +21,7 @@ define void @j(ptr %p) optnone noinline {
define void @h(ptr %p) {
; CHECK-LABEL: define void @h(
-; CHECK-SAME: ptr [[P:%.*]]) local_unnamed_addr {
+; CHECK-SAME: ptr initializes((0, 8)) [[P:%.*]]) local_unnamed_addr {
; CHECK-NEXT: store i64 3, ptr [[P]], align 4
; CHECK-NEXT: tail call void @j(ptr nonnull [[P]])
; CHECK-NEXT: ret void
@@ -33,7 +33,7 @@ define void @h(ptr %p) {
define void @g(ptr dead_on_unwind noalias writable dereferenceable(8) align 8 %p) minsize {
; CHECK-LABEL: define void @g(
-; CHECK-SAME: ptr dead_on_unwind noalias nocapture writable writeonly align 8 dereferenceable(8) [[P:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+; CHECK-SAME: ptr dead_on_unwind noalias nocapture writable writeonly align 8 dereferenceable(8) initializes((0, 8)) [[P:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: tail call void @h(ptr nonnull [[P]])
; CHECK-NEXT: ret void
;
@@ -45,7 +45,7 @@ define void @g(ptr dead_on_unwind noalias writable dereferenceable(8) align 8 %p
define void @f(ptr dead_on_unwind noalias %p) {
; CHECK-LABEL: define void @f(
-; CHECK-SAME: ptr dead_on_unwind noalias [[P:%.*]]) local_unnamed_addr {
+; CHECK-SAME: ptr dead_on_unwind noalias initializes((0, 8)) [[P:%.*]]) local_unnamed_addr {
; CHECK-NEXT: store i64 3, ptr [[P]], align 4
; CHECK-NEXT: tail call void @j(ptr nonnull align 8 dereferenceable(8) [[P]])
; CHECK-NEXT: store i64 43, ptr [[P]], align 4
>From 738bdd49694f2722c9b58b6a1bb99eaa6c0ba051 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Mon, 18 Nov 2024 21:50:54 -0800
Subject: [PATCH 07/22] AMDGPU: Add V_CVT_PK_BF16_F32 for gfx950 (#116678)
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 6 +
llvm/lib/Target/AMDGPU/SIInstrInfo.td | 1 +
llvm/lib/Target/AMDGPU/VOP3Instructions.td | 25 ++
llvm/test/CodeGen/AMDGPU/bf16-conversions.ll | 395 ++++++++----------
llvm/test/MC/AMDGPU/gfx950_asm_vop3.s | 26 ++
.../Disassembler/AMDGPU/gfx950_dasm_vop3.txt | 19 +
6 files changed, 255 insertions(+), 217 deletions(-)
create mode 100644 llvm/test/MC/AMDGPU/gfx950_asm_vop3.s
create mode 100644 llvm/test/MC/Disassembler/AMDGPU/gfx950_dasm_vop3.txt
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 1e261f4256c93b..ad89812558d25c 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -889,6 +889,12 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::MUL, MVT::i1, Promote);
+ if (Subtarget->hasBF16ConversionInsts()) {
+ setOperationAction(ISD::FP_ROUND, MVT::v2bf16, Legal);
+ setOperationAction(ISD::FP_ROUND, MVT::bf16, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2bf16, Legal);
+ }
+
setTargetDAGCombine({ISD::ADD,
ISD::UADDO_CARRY,
ISD::SUB,
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 882e147dc231fa..7df9be5c6f7a0b 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -2787,6 +2787,7 @@ def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>;
def VOP_I32_I32_I32_ARITH : VOPProfile <[i32, i32, i32, untyped], /*EnableClamp=*/1>;
def VOP_V2F16_F32_F32 : VOPProfile <[v2f16, f32, f32, untyped]>;
def VOP_F32_F16_F16_F16 : VOPProfile <[f32, f16, f16, f16]>;
+def VOP_V2BF16_F32_F32 : VOPProfile <[v2bf16, f32, f32, untyped]>;
def VOP_I64_I64_I32 : VOPProfile <[i64, i64, i32, untyped]>;
def VOP_I64_I32_I64 : VOPProfile <[i64, i32, i64, untyped]>;
diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 551e8b3a679202..917e1b3974b46a 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -944,6 +944,30 @@ let SubtargetPredicate = isGFX11Plus in {
defm V_CVT_PK_U16_F32 : VOP3Inst<"v_cvt_pk_u16_f32", VOP3_Profile<VOP_V2I16_F32_F32>>;
} // End SubtargetPredicate = isGFX11Plus
+// FIXME: GlobalISel cannot distinguish f16 and bf16 and may start using bf16 patterns
+// instead of less complex f16. Disable GlobalISel for these for now.
+def bf16_fpround : PatFrag <(ops node:$src0), (fpround $src0), [{ return true; }]> {
+ let GISelPredicateCode = [{return false;}];
+}
+
+let SubtargetPredicate = HasBF16ConversionInsts in {
+ let ReadsModeReg = 0 in {
+ defm V_CVT_PK_BF16_F32 : VOP3Inst<"v_cvt_pk_bf16_f32", VOP3_Profile<VOP_V2BF16_F32_F32>>;
+ }
+ def : GCNPat<(v2bf16 (bf16_fpround v2f32:$src)),
+ (V_CVT_PK_BF16_F32_e64 0, (EXTRACT_SUBREG VReg_64:$src, sub0), 0, (EXTRACT_SUBREG VReg_64:$src, sub1))>;
+ def : GCNPat<(v2bf16 (bf16_fpround v2f64:$src)),
+ (V_CVT_PK_BF16_F32_e64 0, (V_CVT_F32_F64_e64 0, (EXTRACT_SUBREG VReg_128:$src, sub0_sub1)),
+ 0, (V_CVT_F32_F64_e64 0, (EXTRACT_SUBREG VReg_128:$src, sub2_sub3)))>;
+ def : GCNPat<(v2bf16 (build_vector (bf16 (bf16_fpround (f32 (VOP3Mods f32:$src0, i32:$src0_modifiers)))),
+ (bf16 (bf16_fpround (f32 (VOP3Mods f32:$src1, i32:$src1_modifiers)))))),
+ (V_CVT_PK_BF16_F32_e64 $src0_modifiers, $src0, $src1_modifiers, $src1)>;
+ def : GCNPat<(bf16 (bf16_fpround (f32 (VOP3Mods f32:$src0, i32:$src0_modifiers)))),
+ (V_CVT_PK_BF16_F32_e64 $src0_modifiers, $src0, 0, (f32 (IMPLICIT_DEF)))>;
+ def : GCNPat<(bf16 (bf16_fpround (f64 (VOP3Mods f64:$src0, i32:$src0_modifiers)))),
+ (V_CVT_PK_BF16_F32_e64 0, (f32 (V_CVT_F32_F64_e64 $src0_modifiers, $src0)), 0, (f32 (IMPLICIT_DEF)))>;
+}
+
let SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0 in {
defm V_MAXIMUMMINIMUM_F32 : VOP3Inst<"v_maximumminimum_f32", VOP3_Profile<VOP_F32_F32_F32_F32>>;
defm V_MINIMUMMAXIMUM_F32 : VOP3Inst<"v_minimummaximum_f32", VOP3_Profile<VOP_F32_F32_F32_F32>>;
@@ -1721,5 +1745,6 @@ defm V_LSHL_ADD_U64 : VOP3_Real_vi <0x208>;
defm V_CVT_PK_FP8_F32 : VOP3OpSel_Real_gfx9 <0x2a2>;
defm V_CVT_PK_BF8_F32 : VOP3OpSel_Real_gfx9 <0x2a3>;
+defm V_CVT_PK_BF16_F32: VOP3OpSel_Real_gfx9 <0x268>;
defm V_CVT_SR_FP8_F32 : VOP3OpSel_Real_gfx9_forced_opsel2 <0x2a4>;
defm V_CVT_SR_BF8_F32 : VOP3OpSel_Real_gfx9_forced_opsel2 <0x2a5>;
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
index 425fc5884cec7f..135efceb31fdda 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
@@ -24,139 +24,168 @@ define amdgpu_ps float @v_test_cvt_bf16_f32_s(bfloat inreg %v) {
}
define amdgpu_ps float @v_test_cvt_v2f32_v2bf16_v(<2 x float> %src) {
-; GCN-LABEL: v_test_cvt_v2f32_v2bf16_v:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GCN-NEXT: s_movk_i32 s0, 0x7fff
-; GCN-NEXT: v_add3_u32 v2, v2, v0, s0
-; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GCN-NEXT: s_nop 1
-; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
-; GCN-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GCN-NEXT: v_add3_u32 v2, v2, v1, s0
-; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v1
-; GCN-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GCN-NEXT: s_mov_b32 s0, 0x7060302
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
-; GCN-NEXT: v_perm_b32 v0, v1, v0, s0
-; GCN-NEXT: ; return to shader part epilog
+; GFX-940-LABEL: v_test_cvt_v2f32_v2bf16_v:
+; GFX-940: ; %bb.0:
+; GFX-940-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX-940-NEXT: s_movk_i32 s0, 0x7fff
+; GFX-940-NEXT: v_add3_u32 v2, v2, v0, s0
+; GFX-940-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX-940-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX-940-NEXT: s_nop 1
+; GFX-940-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX-940-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX-940-NEXT: v_add3_u32 v2, v2, v1, s0
+; GFX-940-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX-940-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX-940-NEXT: s_mov_b32 s0, 0x7060302
+; GFX-940-NEXT: s_nop 0
+; GFX-940-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; GFX-940-NEXT: v_perm_b32 v0, v1, v0, s0
+; GFX-940-NEXT: ; return to shader part epilog
+;
+; GFX-950-LABEL: v_test_cvt_v2f32_v2bf16_v:
+; GFX-950: ; %bb.0:
+; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX-950-NEXT: ; return to shader part epilog
%res = fptrunc <2 x float> %src to <2 x bfloat>
%cast = bitcast <2 x bfloat> %res to float
ret float %cast
}
define amdgpu_ps float @v_test_cvt_v2f32_v2bf16_s(<2 x float> inreg %src) {
-; GCN-LABEL: v_test_cvt_v2f32_v2bf16_s:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_bfe_u32 s2, s1, 0x10010
-; GCN-NEXT: s_add_i32 s2, s2, s1
-; GCN-NEXT: s_or_b32 s4, s1, 0x400000
-; GCN-NEXT: s_add_i32 s5, s2, 0x7fff
-; GCN-NEXT: v_cmp_u_f32_e64 s[2:3], s1, s1
-; GCN-NEXT: s_and_b64 s[2:3], s[2:3], exec
-; GCN-NEXT: s_cselect_b32 s1, s4, s5
-; GCN-NEXT: s_lshr_b32 s2, s1, 16
-; GCN-NEXT: s_bfe_u32 s1, s0, 0x10010
-; GCN-NEXT: s_add_i32 s1, s1, s0
-; GCN-NEXT: s_or_b32 s3, s0, 0x400000
-; GCN-NEXT: s_add_i32 s4, s1, 0x7fff
-; GCN-NEXT: v_cmp_u_f32_e64 s[0:1], s0, s0
-; GCN-NEXT: s_and_b64 s[0:1], s[0:1], exec
-; GCN-NEXT: s_cselect_b32 s0, s3, s4
-; GCN-NEXT: s_lshr_b32 s0, s0, 16
-; GCN-NEXT: s_pack_ll_b32_b16 s0, s0, s2
-; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: ; return to shader part epilog
+; GFX-940-LABEL: v_test_cvt_v2f32_v2bf16_s:
+; GFX-940: ; %bb.0:
+; GFX-940-NEXT: s_bfe_u32 s2, s1, 0x10010
+; GFX-940-NEXT: s_add_i32 s2, s2, s1
+; GFX-940-NEXT: s_or_b32 s4, s1, 0x400000
+; GFX-940-NEXT: s_add_i32 s5, s2, 0x7fff
+; GFX-940-NEXT: v_cmp_u_f32_e64 s[2:3], s1, s1
+; GFX-940-NEXT: s_and_b64 s[2:3], s[2:3], exec
+; GFX-940-NEXT: s_cselect_b32 s1, s4, s5
+; GFX-940-NEXT: s_lshr_b32 s2, s1, 16
+; GFX-940-NEXT: s_bfe_u32 s1, s0, 0x10010
+; GFX-940-NEXT: s_add_i32 s1, s1, s0
+; GFX-940-NEXT: s_or_b32 s3, s0, 0x400000
+; GFX-940-NEXT: s_add_i32 s4, s1, 0x7fff
+; GFX-940-NEXT: v_cmp_u_f32_e64 s[0:1], s0, s0
+; GFX-940-NEXT: s_and_b64 s[0:1], s[0:1], exec
+; GFX-940-NEXT: s_cselect_b32 s0, s3, s4
+; GFX-940-NEXT: s_lshr_b32 s0, s0, 16
+; GFX-940-NEXT: s_pack_ll_b32_b16 s0, s0, s2
+; GFX-940-NEXT: v_mov_b32_e32 v0, s0
+; GFX-940-NEXT: ; return to shader part epilog
+;
+; GFX-950-LABEL: v_test_cvt_v2f32_v2bf16_s:
+; GFX-950: ; %bb.0:
+; GFX-950-NEXT: v_mov_b32_e32 v0, s1
+; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, s0, v0
+; GFX-950-NEXT: ; return to shader part epilog
%res = fptrunc <2 x float> %src to <2 x bfloat>
%cast = bitcast <2 x bfloat> %res to float
ret float %cast
}
define amdgpu_ps float @v_test_cvt_f32_bf16_v(float %src) {
-; GCN-LABEL: v_test_cvt_f32_bf16_v:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GCN-NEXT: s_movk_i32 s0, 0x7fff
-; GCN-NEXT: v_add3_u32 v1, v1, v0, s0
-; GCN-NEXT: v_or_b32_e32 v2, 0x400000, v0
-; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GCN-NEXT: s_nop 1
-; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
-; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: ; return to shader part epilog
+; GFX-940-LABEL: v_test_cvt_f32_bf16_v:
+; GFX-940: ; %bb.0:
+; GFX-940-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX-940-NEXT: s_movk_i32 s0, 0x7fff
+; GFX-940-NEXT: v_add3_u32 v1, v1, v0, s0
+; GFX-940-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX-940-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX-940-NEXT: s_nop 1
+; GFX-940-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX-940-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX-940-NEXT: ; return to shader part epilog
+;
+; GFX-950-LABEL: v_test_cvt_f32_bf16_v:
+; GFX-950: ; %bb.0:
+; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX-950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX-950-NEXT: ; return to shader part epilog
%trunc = fptrunc float %src to bfloat
%ext = fpext bfloat %trunc to float
ret float %ext
}
define amdgpu_ps float @v_test_cvt_v2f64_v2bf16_v(<2 x double> %src) {
-; GCN-LABEL: v_test_cvt_v2f64_v2bf16_v:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
-; GCN-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
-; GCN-NEXT: v_and_b32_e32 v7, 1, v6
-; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
-; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
-; GCN-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
-; GCN-NEXT: v_add_u32_e32 v4, v6, v4
-; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
-; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; GCN-NEXT: s_brev_b32 s4, 1
-; GCN-NEXT: v_and_or_b32 v5, v1, s4, v4
-; GCN-NEXT: v_bfe_u32 v4, v4, 16, 1
-; GCN-NEXT: s_movk_i32 s5, 0x7fff
-; GCN-NEXT: v_add3_u32 v4, v4, v5, s5
-; GCN-NEXT: v_or_b32_e32 v5, 0x400000, v5
-; GCN-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; GCN-NEXT: s_nop 1
-; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; GCN-NEXT: v_cvt_f32_f64_e64 v5, |v[2:3]|
-; GCN-NEXT: v_cvt_f64_f32_e32 v[0:1], v5
-; GCN-NEXT: v_and_b32_e32 v6, 1, v5
-; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[2:3]|, v[0:1]
-; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[2:3]|, v[0:1]
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v6
-; GCN-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[2:3]
-; GCN-NEXT: v_add_u32_e32 v0, v5, v0
-; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
-; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
-; GCN-NEXT: v_and_or_b32 v1, v3, s4, v0
-; GCN-NEXT: v_bfe_u32 v0, v0, 16, 1
-; GCN-NEXT: v_add3_u32 v0, v0, v1, s5
-; GCN-NEXT: v_or_b32_e32 v1, 0x400000, v1
-; GCN-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[2:3]
-; GCN-NEXT: s_mov_b32 s0, 0x7060302
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-NEXT: v_perm_b32 v0, v0, v4, s0
-; GCN-NEXT: ; return to shader part epilog
+; GFX-940-LABEL: v_test_cvt_v2f64_v2bf16_v:
+; GFX-940: ; %bb.0:
+; GFX-940-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GFX-940-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
+; GFX-940-NEXT: v_and_b32_e32 v7, 1, v6
+; GFX-940-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
+; GFX-940-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
+; GFX-940-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
+; GFX-940-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
+; GFX-940-NEXT: v_add_u32_e32 v4, v6, v4
+; GFX-940-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GFX-940-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX-940-NEXT: s_brev_b32 s4, 1
+; GFX-940-NEXT: v_and_or_b32 v5, v1, s4, v4
+; GFX-940-NEXT: v_bfe_u32 v4, v4, 16, 1
+; GFX-940-NEXT: s_movk_i32 s5, 0x7fff
+; GFX-940-NEXT: v_add3_u32 v4, v4, v5, s5
+; GFX-940-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX-940-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GFX-940-NEXT: s_nop 1
+; GFX-940-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
+; GFX-940-NEXT: v_cvt_f32_f64_e64 v5, |v[2:3]|
+; GFX-940-NEXT: v_cvt_f64_f32_e32 v[0:1], v5
+; GFX-940-NEXT: v_and_b32_e32 v6, 1, v5
+; GFX-940-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[2:3]|, v[0:1]
+; GFX-940-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[2:3]|, v[0:1]
+; GFX-940-NEXT: v_cmp_eq_u32_e32 vcc, 1, v6
+; GFX-940-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[2:3]
+; GFX-940-NEXT: v_add_u32_e32 v0, v5, v0
+; GFX-940-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GFX-940-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
+; GFX-940-NEXT: v_and_or_b32 v1, v3, s4, v0
+; GFX-940-NEXT: v_bfe_u32 v0, v0, 16, 1
+; GFX-940-NEXT: v_add3_u32 v0, v0, v1, s5
+; GFX-940-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; GFX-940-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[2:3]
+; GFX-940-NEXT: s_mov_b32 s0, 0x7060302
+; GFX-940-NEXT: s_nop 0
+; GFX-940-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX-940-NEXT: v_perm_b32 v0, v0, v4, s0
+; GFX-940-NEXT: ; return to shader part epilog
+;
+; GFX-950-LABEL: v_test_cvt_v2f64_v2bf16_v:
+; GFX-950: ; %bb.0:
+; GFX-950-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX-950-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v2
+; GFX-950-NEXT: ; return to shader part epilog
%res = fptrunc <2 x double> %src to <2 x bfloat>
%cast = bitcast <2 x bfloat> %res to float
ret float %cast
}
define amdgpu_ps float @fptrunc_f32_f32_to_v2bf16(float %a, float %b) {
-; GCN-LABEL: fptrunc_f32_f32_to_v2bf16:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GCN-NEXT: s_movk_i32 s0, 0x7fff
-; GCN-NEXT: v_add3_u32 v2, v2, v0, s0
-; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GCN-NEXT: s_nop 1
-; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
-; GCN-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GCN-NEXT: v_add3_u32 v2, v2, v1, s0
-; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v1
-; GCN-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GCN-NEXT: s_mov_b32 s0, 0x7060302
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
-; GCN-NEXT: v_perm_b32 v0, v1, v0, s0
-; GCN-NEXT: ; return to shader part epilog
+; GFX-940-LABEL: fptrunc_f32_f32_to_v2bf16:
+; GFX-940: ; %bb.0: ; %entry
+; GFX-940-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX-940-NEXT: s_movk_i32 s0, 0x7fff
+; GFX-940-NEXT: v_add3_u32 v2, v2, v0, s0
+; GFX-940-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX-940-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX-940-NEXT: s_nop 1
+; GFX-940-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX-940-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX-940-NEXT: v_add3_u32 v2, v2, v1, s0
+; GFX-940-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX-940-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX-940-NEXT: s_mov_b32 s0, 0x7060302
+; GFX-940-NEXT: s_nop 0
+; GFX-940-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; GFX-940-NEXT: v_perm_b32 v0, v1, v0, s0
+; GFX-940-NEXT: ; return to shader part epilog
+;
+; GFX-950-LABEL: fptrunc_f32_f32_to_v2bf16:
+; GFX-950: ; %bb.0: ; %entry
+; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX-950-NEXT: ; return to shader part epilog
entry:
%a.cvt = fptrunc float %a to bfloat
%b.cvt = fptrunc float %b to bfloat
@@ -167,26 +196,31 @@ entry:
}
define amdgpu_ps float @fptrunc_f32_f32_to_v2bf16_mods(float %a, float %b) {
-; GCN-LABEL: fptrunc_f32_f32_to_v2bf16_mods:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
-; GCN-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GCN-NEXT: s_movk_i32 s0, 0x7fff
-; GCN-NEXT: v_add3_u32 v3, v3, v2, s0
-; GCN-NEXT: v_or_b32_e32 v2, 0x400000, v2
-; GCN-NEXT: v_cmp_u_f32_e64 vcc, -v0, -v0
-; GCN-NEXT: s_nop 1
-; GCN-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
-; GCN-NEXT: v_and_b32_e32 v2, 0x7fffffff, v1
-; GCN-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GCN-NEXT: v_add3_u32 v3, v3, v2, s0
-; GCN-NEXT: v_or_b32_e32 v2, 0x400000, v2
-; GCN-NEXT: v_cmp_u_f32_e64 vcc, |v1|, |v1|
-; GCN-NEXT: s_mov_b32 s0, 0x7060302
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
-; GCN-NEXT: v_perm_b32 v0, v1, v0, s0
-; GCN-NEXT: ; return to shader part epilog
+; GFX-940-LABEL: fptrunc_f32_f32_to_v2bf16_mods:
+; GFX-940: ; %bb.0: ; %entry
+; GFX-940-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; GFX-940-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX-940-NEXT: s_movk_i32 s0, 0x7fff
+; GFX-940-NEXT: v_add3_u32 v3, v3, v2, s0
+; GFX-940-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX-940-NEXT: v_cmp_u_f32_e64 vcc, -v0, -v0
+; GFX-940-NEXT: s_nop 1
+; GFX-940-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX-940-NEXT: v_and_b32_e32 v2, 0x7fffffff, v1
+; GFX-940-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX-940-NEXT: v_add3_u32 v3, v3, v2, s0
+; GFX-940-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX-940-NEXT: v_cmp_u_f32_e64 vcc, |v1|, |v1|
+; GFX-940-NEXT: s_mov_b32 s0, 0x7060302
+; GFX-940-NEXT: s_nop 0
+; GFX-940-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GFX-940-NEXT: v_perm_b32 v0, v1, v0, s0
+; GFX-940-NEXT: ; return to shader part epilog
+;
+; GFX-950-LABEL: fptrunc_f32_f32_to_v2bf16_mods:
+; GFX-950: ; %bb.0: ; %entry
+; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, -v0, |v1|
+; GFX-950-NEXT: ; return to shader part epilog
entry:
%a.neg = fneg float %a
%a.cvt = fptrunc float %a.neg to bfloat
@@ -217,14 +251,8 @@ define amdgpu_ps void @fptrunc_f32_to_bf16(float %a, ptr %out) {
; GFX-950: ; %bb.0: ; %entry
; GFX-950-NEXT: v_mov_b32_e32 v3, v2
; GFX-950-NEXT: v_mov_b32_e32 v2, v1
-; GFX-950-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX-950-NEXT: s_movk_i32 s0, 0x7fff
-; GFX-950-NEXT: v_add3_u32 v1, v1, v0, s0
-; GFX-950-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX-950-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX-950-NEXT: s_nop 1
-; GFX-950-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
-; GFX-950-NEXT: flat_store_short_d16_hi v[2:3], v0
+; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
entry:
%a.cvt = fptrunc float %a to bfloat
@@ -252,15 +280,8 @@ define amdgpu_ps void @fptrunc_f32_to_bf16_abs(float %a, ptr %out) {
; GFX-950: ; %bb.0: ; %entry
; GFX-950-NEXT: v_mov_b32_e32 v3, v2
; GFX-950-NEXT: v_mov_b32_e32 v2, v1
-; GFX-950-NEXT: v_and_b32_e32 v1, 0x7fffffff, v0
-; GFX-950-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX-950-NEXT: s_movk_i32 s0, 0x7fff
-; GFX-950-NEXT: v_add3_u32 v4, v4, v1, s0
-; GFX-950-NEXT: v_or_b32_e32 v1, 0x400000, v1
-; GFX-950-NEXT: v_cmp_u_f32_e64 vcc, |v0|, |v0|
-; GFX-950-NEXT: s_nop 1
-; GFX-950-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX-950-NEXT: flat_store_short_d16_hi v[2:3], v0
+; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, |v0|, s0
+; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
entry:
%a.abs = call float @llvm.fabs.f32(float %a)
@@ -289,15 +310,8 @@ define amdgpu_ps void @fptrunc_f32_to_bf16_neg(float %a, ptr %out) {
; GFX-950: ; %bb.0: ; %entry
; GFX-950-NEXT: v_mov_b32_e32 v3, v2
; GFX-950-NEXT: v_mov_b32_e32 v2, v1
-; GFX-950-NEXT: v_xor_b32_e32 v1, 0x80000000, v0
-; GFX-950-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX-950-NEXT: s_movk_i32 s0, 0x7fff
-; GFX-950-NEXT: v_add3_u32 v4, v4, v1, s0
-; GFX-950-NEXT: v_or_b32_e32 v1, 0x400000, v1
-; GFX-950-NEXT: v_cmp_u_f32_e64 vcc, -v0, -v0
-; GFX-950-NEXT: s_nop 1
-; GFX-950-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX-950-NEXT: flat_store_short_d16_hi v[2:3], v0
+; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, -v0, s0
+; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
entry:
%a.neg = fneg float %a
@@ -333,26 +347,9 @@ define amdgpu_ps void @fptrunc_f64_to_bf16(double %a, ptr %out) {
;
; GFX-950-LABEL: fptrunc_f64_to_bf16:
; GFX-950: ; %bb.0: ; %entry
-; GFX-950-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
-; GFX-950-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
-; GFX-950-NEXT: v_and_b32_e32 v7, 1, v6
-; GFX-950-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
-; GFX-950-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
-; GFX-950-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
-; GFX-950-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
-; GFX-950-NEXT: v_add_u32_e32 v4, v6, v4
-; GFX-950-NEXT: s_or_b64 vcc, s[0:1], vcc
-; GFX-950-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; GFX-950-NEXT: s_brev_b32 s0, 1
-; GFX-950-NEXT: v_and_or_b32 v5, v1, s0, v4
-; GFX-950-NEXT: v_bfe_u32 v4, v4, 16, 1
-; GFX-950-NEXT: s_movk_i32 s0, 0x7fff
-; GFX-950-NEXT: v_add3_u32 v4, v4, v5, s0
-; GFX-950-NEXT: v_or_b32_e32 v5, 0x400000, v5
-; GFX-950-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; GFX-950-NEXT: s_nop 1
-; GFX-950-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
-; GFX-950-NEXT: flat_store_short_d16_hi v[2:3], v0
+; GFX-950-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
entry:
%a.cvt = fptrunc double %a to bfloat
@@ -388,27 +385,9 @@ define amdgpu_ps void @fptrunc_f64_to_bf16_neg(double %a, ptr %out) {
;
; GFX-950-LABEL: fptrunc_f64_to_bf16_neg:
; GFX-950: ; %bb.0: ; %entry
-; GFX-950-NEXT: v_cvt_f32_f64_e64 v7, |v[0:1]|
-; GFX-950-NEXT: v_cvt_f64_f32_e32 v[4:5], v7
-; GFX-950-NEXT: v_and_b32_e32 v8, 1, v7
-; GFX-950-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
-; GFX-950-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
-; GFX-950-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
-; GFX-950-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
-; GFX-950-NEXT: v_add_u32_e32 v4, v7, v4
-; GFX-950-NEXT: s_or_b64 vcc, s[0:1], vcc
-; GFX-950-NEXT: s_brev_b32 s4, 1
-; GFX-950-NEXT: v_xor_b32_e32 v6, 0x80000000, v1
-; GFX-950-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
-; GFX-950-NEXT: v_and_or_b32 v5, v6, s4, v4
-; GFX-950-NEXT: v_bfe_u32 v4, v4, 16, 1
-; GFX-950-NEXT: s_movk_i32 s0, 0x7fff
-; GFX-950-NEXT: v_add3_u32 v4, v4, v5, s0
-; GFX-950-NEXT: v_or_b32_e32 v5, 0x400000, v5
-; GFX-950-NEXT: v_cmp_u_f64_e64 vcc, -v[0:1], -v[0:1]
-; GFX-950-NEXT: s_nop 1
-; GFX-950-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
-; GFX-950-NEXT: flat_store_short_d16_hi v[2:3], v0
+; GFX-950-NEXT: v_cvt_f32_f64_e64 v0, -v[0:1]
+; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
entry:
%a.neg = fneg double %a
@@ -445,27 +424,9 @@ define amdgpu_ps void @fptrunc_f64_to_bf16_abs(double %a, ptr %out) {
;
; GFX-950-LABEL: fptrunc_f64_to_bf16_abs:
; GFX-950: ; %bb.0: ; %entry
-; GFX-950-NEXT: v_cvt_f32_f64_e64 v7, |v[0:1]|
-; GFX-950-NEXT: v_cvt_f64_f32_e32 v[4:5], v7
-; GFX-950-NEXT: v_and_b32_e32 v8, 1, v7
-; GFX-950-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
-; GFX-950-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
-; GFX-950-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
-; GFX-950-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
-; GFX-950-NEXT: v_add_u32_e32 v4, v7, v4
-; GFX-950-NEXT: s_or_b64 vcc, s[0:1], vcc
-; GFX-950-NEXT: v_and_b32_e32 v6, 0x7fffffff, v1
-; GFX-950-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
-; GFX-950-NEXT: s_brev_b32 s0, 1
-; GFX-950-NEXT: v_and_or_b32 v5, v6, s0, v4
-; GFX-950-NEXT: v_bfe_u32 v4, v4, 16, 1
-; GFX-950-NEXT: s_movk_i32 s0, 0x7fff
-; GFX-950-NEXT: v_add3_u32 v4, v4, v5, s0
-; GFX-950-NEXT: v_or_b32_e32 v5, 0x400000, v5
-; GFX-950-NEXT: v_cmp_u_f64_e64 vcc, |v[0:1]|, |v[0:1]|
-; GFX-950-NEXT: s_nop 1
-; GFX-950-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
-; GFX-950-NEXT: flat_store_short_d16_hi v[2:3], v0
+; GFX-950-NEXT: v_cvt_f32_f64_e64 v0, |v[0:1]|
+; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
entry:
%a.abs = call double @llvm.fabs.f64(double %a)
diff --git a/llvm/test/MC/AMDGPU/gfx950_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx950_asm_vop3.s
new file mode 100644
index 00000000000000..c9980f420b9552
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx950_asm_vop3.s
@@ -0,0 +1,26 @@
+// RUN: llvm-mc -arch=amdgcn -mcpu=gfx950 -show-encoding %s | FileCheck --check-prefix=GFX950 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx940 %s 2>&1 | FileCheck -check-prefix=GFX940-ERR --strict-whitespace %s
+
+v_cvt_pk_bf16_f32 v5, v1, v2
+// GFX950: v_cvt_pk_bf16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x68,0xd2,0x01,0x05,0x02,0x00]
+// GFX940-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_cvt_pk_bf16_f32 v5, v255, v255
+// GFX950: v_cvt_pk_bf16_f32 v5, v255, v255 ; encoding: [0x05,0x00,0x68,0xd2,0xff,0xff,0x03,0x00]
+// GFX940-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_cvt_pk_bf16_f32 v5, v1, s2
+// GFX950: v_cvt_pk_bf16_f32 v5, v1, s2 ; encoding: [0x05,0x00,0x68,0xd2,0x01,0x05,0x00,0x00]
+// GFX940-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_cvt_pk_bf16_f32 v5, m0, 0.5
+// GFX950: v_cvt_pk_bf16_f32 v5, m0, 0.5 ; encoding: [0x05,0x00,0x68,0xd2,0x7c,0xe0,0x01,0x00]
+// GFX940-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_cvt_pk_bf16_f32 v5, -1, exec_hi
+// GFX950: v_cvt_pk_bf16_f32 v5, -1, exec_hi ; encoding: [0x05,0x00,0x68,0xd2,0xc1,0xfe,0x00,0x00]
+// GFX940-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_cvt_pk_bf16_f32 v5, 0.5, m0 mul:2
+// GFX950: v_cvt_pk_bf16_f32 v5, 0.5, m0 mul:2 ; encoding: [0x05,0x00,0x68,0xd2,0xf0,0xf8,0x00,0x08]
+// GFX940-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx950_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx950_dasm_vop3.txt
new file mode 100644
index 00000000000000..909743c2babf59
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx950_dasm_vop3.txt
@@ -0,0 +1,19 @@
+# RUN: llvm-mc -arch=amdgcn -mcpu=gfx950 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX950 %s
+
+# GFX950: v_cvt_pk_bf16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x68,0xd2,0x01,0x05,0x02,0x00]
+0x05,0x00,0x68,0xd2,0x01,0x05,0x02,0x00
+
+# GFX950: v_cvt_pk_bf16_f32 v5, v255, v255 ; encoding: [0x05,0x00,0x68,0xd2,0xff,0xff,0x03,0x00]
+0x05,0x00,0x68,0xd2,0xff,0xff,0x03,0x00
+
+# GFX950: v_cvt_pk_bf16_f32 v5, v1, s2 ; encoding: [0x05,0x00,0x68,0xd2,0x01,0x05,0x00,0x00]
+0x05,0x00,0x68,0xd2,0x01,0x05,0x00,0x00
+
+# GFX950: v_cvt_pk_bf16_f32 v5, m0, 0.5 ; encoding: [0x05,0x00,0x68,0xd2,0x7c,0xe0,0x01,0x00]
+0x05,0x00,0x68,0xd2,0x7c,0xe0,0x01,0x00
+
+# GFX950: v_cvt_pk_bf16_f32 v5, -1, exec_hi ; encoding: [0x05,0x00,0x68,0xd2,0xc1,0xfe,0x00,0x00]
+0x05,0x00,0x68,0xd2,0xc1,0xfe,0x00,0x00
+
+# GFX950: v_cvt_pk_bf16_f32 v5, 0.5, m0 mul:2 ; encoding: [0x05,0x00,0x68,0xd2,0xf0,0xf8,0x00,0x08]
+0x05,0x00,0x68,0xd2,0xf0,0xf8,0x00,0x08
>From 130a3150ec9cdaecdf9b0fa773b8c23a6b9bc527 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Mon, 18 Nov 2024 21:53:56 -0800
Subject: [PATCH 08/22] AMDGPU: Define v_mfma_f32_32x32x16_bf16 for gfx950
(#116679)
Unlike the existing gfx940 intrinsics using short/i16 in place of
bfloat, this uses the natural bfloat type.
---
clang/include/clang/Basic/BuiltinsAMDGPU.def | 2 +
.../CodeGenOpenCL/builtins-amdgcn-mfma.cl | 6 +
.../builtins-amdgcn-error-gfx950-param.cl | 7 +
.../builtins-amdgcn-error-gfx950.cl | 5 +-
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 2 +
llvm/lib/Target/AMDGPU/SIInstrInfo.td | 1 +
llvm/lib/Target/AMDGPU/VOP3PInstructions.td | 6 +
.../UniformityAnalysis/AMDGPU/intrinsics.ll | 8 +
.../AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll | 474 ++++++++++++++++++
llvm/test/MC/AMDGPU/mai-gfx950.s | 56 ++-
.../MC/Disassembler/AMDGPU/gfx950_mai.txt | 27 +
llvm/test/tools/llvm-mca/AMDGPU/gfx950.s | 10 +-
12 files changed, 596 insertions(+), 8 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index 6917d8d1aca69d..7ce8f2c1669d67 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -437,6 +437,8 @@ TARGET_BUILTIN(__builtin_amdgcn_cvt_sr_fp8_f32, "ifiiIi", "nc", "fp8-conversion-
TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x32_f16, "V4fV8hV8hV4fIiIiIi", "nc", "gfx950-insts")
TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x16_f16, "V16fV8hV8hV16fIiIiIi", "nc", "gfx950-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x16_bf16, "V16fV8yV8yV16fIiIiIi", "nc", "gfx950-insts")
+
//===----------------------------------------------------------------------===//
// GFX12+ only builtins.
//===----------------------------------------------------------------------===//
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-mfma.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-mfma.cl
index a644a60f9ec381..841d8fcad0fee0 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-mfma.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-mfma.cl
@@ -24,6 +24,7 @@ typedef short v8s __attribute__((ext_vector_type(8)));
typedef short v16s __attribute__((ext_vector_type(16)));
typedef short v32s __attribute__((ext_vector_type(32)));
typedef double v4d __attribute__((ext_vector_type(4)));
+typedef __bf16 v8bf16 __attribute__((ext_vector_type(8)));
#ifdef MFMA_GFX908_TESTS
@@ -424,5 +425,10 @@ v16f test_mfma_f32_32x32x16_f16(v8h a, v8h b, v16f c)
return __builtin_amdgcn_mfma_f32_32x32x16_f16(a, b, c, 1, 2, 3);
}
+// CHECK-GFX950-LABEL: @test_mfma_f32_32x32x16_bf16(
+// CHECK-GFX950: tail call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %a, <8 x bfloat> %b, <16 x float> %c, i32 1, i32 2, i32 3)
+v16f test_mfma_f32_32x32x16_bf16(v8bf16 a, v8bf16 b, v16f c) {
+ return __builtin_amdgcn_mfma_f32_32x32x16_bf16(a, b, c, 1, 2, 3);
+}
#endif
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950-param.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950-param.cl
index 4c267e2cac5cad..4af67763c40dd2 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950-param.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950-param.cl
@@ -4,6 +4,7 @@
typedef float float4 __attribute__((ext_vector_type(4)));
typedef float float16 __attribute__((ext_vector_type(16)));
typedef half half8 __attribute__((ext_vector_type(8)));
+typedef __bf16 bfloat8 __attribute__((ext_vector_type(8)));
void test_mfma_f32_16x16x32_f16(__global float4* out, half8 a, half8 b, float4 c, int X) {
@@ -19,3 +20,9 @@ void test_mfma_f32_32x32x16_f16(__global float16* out, half8 a, half8 b, float16
*out = __builtin_amdgcn_mfma_f32_32x32x16_f16(a, b, c, 0, X, 0); // expected-error{{argument to '__builtin_amdgcn_mfma_f32_32x32x16_f16' must be a constant integer}}
*out = __builtin_amdgcn_mfma_f32_32x32x16_f16(a, b, c, 0, 0, X); // expected-error{{argument to '__builtin_amdgcn_mfma_f32_32x32x16_f16' must be a constant integer}}
}
+
+void test_mfma_f32_32x32x16_bf16(__global float16* out, bfloat8 a, bfloat8 b, float16 c, int X) {
+ *out = __builtin_amdgcn_mfma_f32_32x32x16_bf16(a, b, c, X, 0, 0); // expected-error{{argument to '__builtin_amdgcn_mfma_f32_32x32x16_bf16' must be a constant integer}}
+ *out = __builtin_amdgcn_mfma_f32_32x32x16_bf16(a, b, c, 0, X, 0); // expected-error{{argument to '__builtin_amdgcn_mfma_f32_32x32x16_bf16' must be a constant integer}}
+ *out = __builtin_amdgcn_mfma_f32_32x32x16_bf16(a, b, c, 0, 0, X); // expected-error{{argument to '__builtin_amdgcn_mfma_f32_32x32x16_bf16' must be a constant integer}}
+}
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950.cl
index 0b3a8e78e1c795..e0fd2aa5c58a02 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950.cl
@@ -4,9 +4,12 @@
typedef float float4 __attribute__((ext_vector_type(4)));
typedef float float16 __attribute__((ext_vector_type(16)));
typedef half half8 __attribute__((ext_vector_type(8)));
+typedef __bf16 bfloat8 __attribute__((ext_vector_type(8)));
void test(__global float4* out0, half8 a0, half8 b0, float4 c0,
- __global float16* out1, half8 a1, half8 b1, float16 c1) {
+ __global float16* out1, half8 a1, half8 b1, float16 c1,
+ __global float16* out2, bfloat8 a2, bfloat8 b2, float16 c2) {
*out0 = __builtin_amdgcn_mfma_f32_16x16x32_f16(a0, b0, c0, 0, 0, 0); // expected-error{{'__builtin_amdgcn_mfma_f32_16x16x32_f16' needs target feature gfx950-insts}}
*out1 = __builtin_amdgcn_mfma_f32_32x32x16_f16(a1, b1, c1, 0, 0, 0); // expected-error{{'__builtin_amdgcn_mfma_f32_32x32x16_f16' needs target feature gfx950-insts}}
+ *out2 = __builtin_amdgcn_mfma_f32_32x32x16_bf16(a2, b2, c2, 0, 0, 0); // expected-error{{'__builtin_amdgcn_mfma_f32_32x32x16_bf16' needs target feature gfx950-insts}}
}
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index ec1234e7bc7d94..15f33cdbf92e6e 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -3117,6 +3117,8 @@ def int_amdgcn_cvt_sr_fp8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_sr_fp8_f32">,
defset list<Intrinsic> AMDGPUMFMAIntrinsics950 = {
def int_amdgcn_mfma_f32_16x16x32_f16 : AMDGPUMfmaIntrinsic<llvm_v4f32_ty, llvm_v8f16_ty>;
def int_amdgcn_mfma_f32_32x32x16_f16 : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v8f16_ty>;
+
+def int_amdgcn_mfma_f32_32x32x16_bf16 : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v8bf16_ty>;
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 7df9be5c6f7a0b..2079b34d0448f4 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -2848,6 +2848,7 @@ def VOP_V16F32_V2I32_V4I32_I32 : VOPProfile <[v16f32, v2i32, v4i32, i32]>;
def VOP_V4F32_V8F16_V8F16_V4F32 : VOPProfile <[v4f32, v8f16, v8f16, v4f32]>;
def VOP_V16F32_V8F16_V8F16_V16F32 : VOPProfile <[v16f32, v8f16, v8f16, v16f32]>;
+def VOP_V16F32_V8BF16_V8BF16_V16F32 : VOPProfile <[v16f32, v8bf16, v8bf16, v16f32]>;
class Commutable_REV <string revOp, bit isOrig> {
diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index 58e26a96ece202..08882e41d863a1 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -631,6 +631,9 @@ def VOPProfileMAI_F32_V8F16_X32_VCD : VOPProfileMAI<VOP_V4F32_V8F16_V8F16_V4F32,
def VOPProfileMAI_F32_V8F16_X16 : VOPProfileMAI<VOP_V16F32_V8F16_V8F16_V16F32, AISrc_512_f32, ADst_512, AVSrc_128>;
def VOPProfileMAI_F32_V8F16_X16_VCD : VOPProfileMAI<VOP_V16F32_V8F16_V8F16_V16F32, VISrc_512_f32, VDst_512, AVSrc_128>;
+def VOPProfileMAI_F32_V8BF16_X16 : VOPProfileMAI<VOP_V16F32_V8BF16_V8BF16_V16F32, AISrc_512_f32, ADst_512, AVSrc_128>;
+def VOPProfileMAI_F32_V8BF16_X16_VCD : VOPProfileMAI<VOP_V16F32_V8BF16_V8BF16_V16F32, VISrc_512_f32, VDst_512, AVSrc_128>;
+
class MFMATable <bit is_mac, string Name> {
bit IsMac = is_mac;
string FMAOp = Name;
@@ -747,6 +750,7 @@ defm V_MFMA_F32_32X32X4BF16 : MAIInst<"v_mfma_f32_32x32x4bf16", "F32_V2I16_X16",
let SubtargetPredicate = HasGFX950Insts, is_gfx940_xdl = 1 in {
defm V_MFMA_F32_16X16X32_F16 : MAIInst<"v_mfma_f32_16x16x32f16", "F32_V8F16_X32", int_amdgcn_mfma_f32_16x16x32_f16>;
defm V_MFMA_F32_32X32X16_F16 : MAIInst<"v_mfma_f32_32x32x16f16", "F32_V8F16_X16", int_amdgcn_mfma_f32_32x32x16_f16>;
+defm V_MFMA_F32_32X32X16_BF16 : MAIInst<"v_mfma_f32_32x32x16bf16", "F32_V8BF16_X16", int_amdgcn_mfma_f32_32x32x16_bf16>;
}
let Predicates = [isGFX90APlus] in {
@@ -1786,6 +1790,8 @@ defm V_MFMA_F64_4X4X4F64 : VOP3P_Real_MFMA_gfx90a <0x6f>;
defm V_MFMA_F32_16X16X32_F16 : VOP3P_Real_MFMA_gfx950 <0x54, "v_mfma_f32_16x16x32_f16">;
defm V_MFMA_F32_32X32X16_F16 : VOP3P_Real_MFMA_gfx950 <0x55, "v_mfma_f32_32x32x16_f16">;
+defm V_MFMA_F32_32X32X16_BF16 : VOP3P_Real_MFMA_gfx950 <0x37, "v_mfma_f32_32x32x16_bf16">;
+
defm V_MFMA_I32_32X32X16I8 : VOP3P_Real_MFMA_gfx940 <0x56, "v_mfma_i32_32x32x16_i8">;
defm V_MFMA_I32_16X16X32I8 : VOP3P_Real_MFMA_gfx940 <0x57, "v_mfma_i32_16x16x32_i8">;
let SubtargetPredicate = HasXF32Insts in {
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
index c457d867af361e..00a3aaf77f9003 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
@@ -278,6 +278,14 @@ define amdgpu_kernel void @mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x half> %a
ret void
}
+; CHECK: DIVERGENT: %result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 immarg 0, i32 immarg 0, i32 immarg 0)
+define amdgpu_kernel void @mfma_f32_32x32x16_bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, ptr addrspace(1) %out) {
+ %result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 immarg 0, i32 immarg 0, i32 immarg 0)
+ store <16 x float> %result, ptr addrspace(1) %out
+ ret void
+}
+
+
declare i32 @llvm.amdgcn.ds.swizzle(i32, i32) #1
declare i32 @llvm.amdgcn.permlane16.i32(i32, i32, i32, i32, i1, i1) #1
declare i32 @llvm.amdgcn.permlanex16.i32(i32, i32, i32, i32, i1, i1) #1
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll
new file mode 100644
index 00000000000000..2da602713d72c4
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll
@@ -0,0 +1,474 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -march=amdgcn -mcpu=gfx950 < %s | FileCheck -enable-var-scope --check-prefix=GCN %s
+
+; FIXME: bfloat vector arguments are broken in globalisel.
+; https://github.com/llvm/llvm-project/issues/77055
+
+declare <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat>, <8 x bfloat>, <16 x float>, i32 immarg, i32 immarg, i32 immarg)
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.mfma.f32.32x32x16.bf16
+; --------------------------------------------------------------------
+
+define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2) #1 {
+; GCN-LABEL: test_mfma_f32_32x32x16_bf16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
+; GCN-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
+; GCN-NEXT: v_mov_b64_e32 v[12:13], 48
+; GCN-NEXT: v_mov_b64_e32 v[14:15], 32
+; GCN-NEXT: v_mov_b64_e32 v[16:17], 16
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
+; GCN-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
+; GCN-NEXT: v_mov_b64_e32 v[4:5], s[28:29]
+; GCN-NEXT: v_accvgpr_write_b32 a0, s8
+; GCN-NEXT: v_mov_b64_e32 v[6:7], s[30:31]
+; GCN-NEXT: v_accvgpr_write_b32 a1, s9
+; GCN-NEXT: v_accvgpr_write_b32 a2, s10
+; GCN-NEXT: v_accvgpr_write_b32 a3, s11
+; GCN-NEXT: v_accvgpr_write_b32 a4, s12
+; GCN-NEXT: v_accvgpr_write_b32 a5, s13
+; GCN-NEXT: v_accvgpr_write_b32 a6, s14
+; GCN-NEXT: v_accvgpr_write_b32 a7, s15
+; GCN-NEXT: v_accvgpr_write_b32 a8, s16
+; GCN-NEXT: v_accvgpr_write_b32 a9, s17
+; GCN-NEXT: v_accvgpr_write_b32 a10, s18
+; GCN-NEXT: v_accvgpr_write_b32 a11, s19
+; GCN-NEXT: v_accvgpr_write_b32 a12, s20
+; GCN-NEXT: v_accvgpr_write_b32 a13, s21
+; GCN-NEXT: v_accvgpr_write_b32 a14, s22
+; GCN-NEXT: v_accvgpr_write_b32 a15, s23
+; GCN-NEXT: v_mov_b64_e32 v[18:19], 0
+; GCN-NEXT: v_mov_b32_e32 v8, s16
+; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[16:31], v[0:3], v[4:7], a[0:15]
+; GCN-NEXT: v_mov_b32_e32 v0, s20
+; GCN-NEXT: v_mov_b32_e32 v1, s21
+; GCN-NEXT: v_mov_b32_e32 v2, s22
+; GCN-NEXT: v_mov_b32_e32 v3, s23
+; GCN-NEXT: v_mov_b32_e32 v9, s17
+; GCN-NEXT: v_mov_b32_e32 v10, s18
+; GCN-NEXT: v_mov_b32_e32 v11, s19
+; GCN-NEXT: s_nop 3
+; GCN-NEXT: global_store_dwordx4 v[12:13], a[28:31], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v[14:15], a[24:27], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v[16:17], a[20:23], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v[18:19], a[16:19], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v[14:15], v[8:11], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v[12:13], v[0:3], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_mov_b32_e32 v0, s8
+; GCN-NEXT: v_mov_b32_e32 v1, s9
+; GCN-NEXT: v_mov_b32_e32 v2, s10
+; GCN-NEXT: v_mov_b32_e32 v3, s11
+; GCN-NEXT: global_store_dwordx4 v[18:19], v[0:3], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_mov_b32_e32 v0, s12
+; GCN-NEXT: v_mov_b32_e32 v1, s13
+; GCN-NEXT: v_mov_b32_e32 v2, s14
+; GCN-NEXT: v_mov_b32_e32 v3, s15
+; GCN-NEXT: global_store_dwordx4 v[16:17], v[0:3], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_endpgm
+ %result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 0, i32 0, i32 0)
+ store volatile <16 x float> %result, ptr addrspace(1) null
+ store volatile <16 x float> %arg2, ptr addrspace(1) null
+ ret void
+}
+
+define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__flags(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2) #1 {
+; GCN-LABEL: test_mfma_f32_32x32x16_bf16__flags:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
+; GCN-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
+; GCN-NEXT: v_mov_b64_e32 v[12:13], 48
+; GCN-NEXT: v_mov_b64_e32 v[14:15], 32
+; GCN-NEXT: v_mov_b64_e32 v[16:17], 16
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
+; GCN-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
+; GCN-NEXT: v_mov_b64_e32 v[4:5], s[28:29]
+; GCN-NEXT: v_accvgpr_write_b32 a0, s8
+; GCN-NEXT: v_mov_b64_e32 v[6:7], s[30:31]
+; GCN-NEXT: v_accvgpr_write_b32 a1, s9
+; GCN-NEXT: v_accvgpr_write_b32 a2, s10
+; GCN-NEXT: v_accvgpr_write_b32 a3, s11
+; GCN-NEXT: v_accvgpr_write_b32 a4, s12
+; GCN-NEXT: v_accvgpr_write_b32 a5, s13
+; GCN-NEXT: v_accvgpr_write_b32 a6, s14
+; GCN-NEXT: v_accvgpr_write_b32 a7, s15
+; GCN-NEXT: v_accvgpr_write_b32 a8, s16
+; GCN-NEXT: v_accvgpr_write_b32 a9, s17
+; GCN-NEXT: v_accvgpr_write_b32 a10, s18
+; GCN-NEXT: v_accvgpr_write_b32 a11, s19
+; GCN-NEXT: v_accvgpr_write_b32 a12, s20
+; GCN-NEXT: v_accvgpr_write_b32 a13, s21
+; GCN-NEXT: v_accvgpr_write_b32 a14, s22
+; GCN-NEXT: v_accvgpr_write_b32 a15, s23
+; GCN-NEXT: v_mov_b64_e32 v[18:19], 0
+; GCN-NEXT: v_mov_b32_e32 v8, s16
+; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1
+; GCN-NEXT: v_mov_b32_e32 v0, s20
+; GCN-NEXT: v_mov_b32_e32 v1, s21
+; GCN-NEXT: v_mov_b32_e32 v2, s22
+; GCN-NEXT: v_mov_b32_e32 v3, s23
+; GCN-NEXT: v_mov_b32_e32 v9, s17
+; GCN-NEXT: v_mov_b32_e32 v10, s18
+; GCN-NEXT: v_mov_b32_e32 v11, s19
+; GCN-NEXT: s_nop 3
+; GCN-NEXT: global_store_dwordx4 v[12:13], a[28:31], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v[14:15], a[24:27], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v[16:17], a[20:23], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v[18:19], a[16:19], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v[14:15], v[8:11], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v[12:13], v[0:3], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_mov_b32_e32 v0, s8
+; GCN-NEXT: v_mov_b32_e32 v1, s9
+; GCN-NEXT: v_mov_b32_e32 v2, s10
+; GCN-NEXT: v_mov_b32_e32 v3, s11
+; GCN-NEXT: global_store_dwordx4 v[18:19], v[0:3], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_mov_b32_e32 v0, s12
+; GCN-NEXT: v_mov_b32_e32 v1, s13
+; GCN-NEXT: v_mov_b32_e32 v2, s14
+; GCN-NEXT: v_mov_b32_e32 v3, s15
+; GCN-NEXT: global_store_dwordx4 v[16:17], v[0:3], off sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_endpgm
+ %result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 2, i32 3, i32 1)
+ store volatile <16 x float> %result, ptr addrspace(1) null
+ store volatile <16 x float> %arg2, ptr addrspace(1) null
+ ret void
+}
+
+define <16 x float> @test_mfma_f32_32x32x16_bf16__mac(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2) {
+; GCN-LABEL: test_mfma_f32_32x32x16_bf16__mac:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_accvgpr_write_b32 a0, v8
+; GCN-NEXT: v_accvgpr_write_b32 a1, v9
+; GCN-NEXT: v_accvgpr_write_b32 a2, v10
+; GCN-NEXT: v_accvgpr_write_b32 a3, v11
+; GCN-NEXT: v_accvgpr_write_b32 a4, v12
+; GCN-NEXT: v_accvgpr_write_b32 a5, v13
+; GCN-NEXT: v_accvgpr_write_b32 a6, v14
+; GCN-NEXT: v_accvgpr_write_b32 a7, v15
+; GCN-NEXT: v_accvgpr_write_b32 a8, v16
+; GCN-NEXT: v_accvgpr_write_b32 a9, v17
+; GCN-NEXT: v_accvgpr_write_b32 a10, v18
+; GCN-NEXT: v_accvgpr_write_b32 a11, v19
+; GCN-NEXT: v_accvgpr_write_b32 a12, v20
+; GCN-NEXT: v_accvgpr_write_b32 a13, v21
+; GCN-NEXT: v_accvgpr_write_b32 a14, v22
+; GCN-NEXT: v_accvgpr_write_b32 a15, v23
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[0:15], v[0:3], v[4:7], a[0:15]
+; GCN-NEXT: s_nop 7
+; GCN-NEXT: s_nop 2
+; GCN-NEXT: v_accvgpr_read_b32 v0, a0
+; GCN-NEXT: v_accvgpr_read_b32 v1, a1
+; GCN-NEXT: v_accvgpr_read_b32 v2, a2
+; GCN-NEXT: v_accvgpr_read_b32 v3, a3
+; GCN-NEXT: v_accvgpr_read_b32 v4, a4
+; GCN-NEXT: v_accvgpr_read_b32 v5, a5
+; GCN-NEXT: v_accvgpr_read_b32 v6, a6
+; GCN-NEXT: v_accvgpr_read_b32 v7, a7
+; GCN-NEXT: v_accvgpr_read_b32 v8, a8
+; GCN-NEXT: v_accvgpr_read_b32 v9, a9
+; GCN-NEXT: v_accvgpr_read_b32 v10, a10
+; GCN-NEXT: v_accvgpr_read_b32 v11, a11
+; GCN-NEXT: v_accvgpr_read_b32 v12, a12
+; GCN-NEXT: v_accvgpr_read_b32 v13, a13
+; GCN-NEXT: v_accvgpr_read_b32 v14, a14
+; GCN-NEXT: v_accvgpr_read_b32 v15, a15
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 0, i32 0, i32 0)
+ ret <16 x float> %result
+}
+
+define <16 x float> @test_mfma_f32_32x32x16_bf16__mac__flags(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2) {
+; GCN-LABEL: test_mfma_f32_32x32x16_bf16__mac__flags:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_accvgpr_write_b32 a0, v8
+; GCN-NEXT: v_accvgpr_write_b32 a1, v9
+; GCN-NEXT: v_accvgpr_write_b32 a2, v10
+; GCN-NEXT: v_accvgpr_write_b32 a3, v11
+; GCN-NEXT: v_accvgpr_write_b32 a4, v12
+; GCN-NEXT: v_accvgpr_write_b32 a5, v13
+; GCN-NEXT: v_accvgpr_write_b32 a6, v14
+; GCN-NEXT: v_accvgpr_write_b32 a7, v15
+; GCN-NEXT: v_accvgpr_write_b32 a8, v16
+; GCN-NEXT: v_accvgpr_write_b32 a9, v17
+; GCN-NEXT: v_accvgpr_write_b32 a10, v18
+; GCN-NEXT: v_accvgpr_write_b32 a11, v19
+; GCN-NEXT: v_accvgpr_write_b32 a12, v20
+; GCN-NEXT: v_accvgpr_write_b32 a13, v21
+; GCN-NEXT: v_accvgpr_write_b32 a14, v22
+; GCN-NEXT: v_accvgpr_write_b32 a15, v23
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[0:15], v[0:3], v[4:7], a[0:15] cbsz:1 abid:1 blgp:1
+; GCN-NEXT: s_nop 7
+; GCN-NEXT: s_nop 2
+; GCN-NEXT: v_accvgpr_read_b32 v0, a0
+; GCN-NEXT: v_accvgpr_read_b32 v1, a1
+; GCN-NEXT: v_accvgpr_read_b32 v2, a2
+; GCN-NEXT: v_accvgpr_read_b32 v3, a3
+; GCN-NEXT: v_accvgpr_read_b32 v4, a4
+; GCN-NEXT: v_accvgpr_read_b32 v5, a5
+; GCN-NEXT: v_accvgpr_read_b32 v6, a6
+; GCN-NEXT: v_accvgpr_read_b32 v7, a7
+; GCN-NEXT: v_accvgpr_read_b32 v8, a8
+; GCN-NEXT: v_accvgpr_read_b32 v9, a9
+; GCN-NEXT: v_accvgpr_read_b32 v10, a10
+; GCN-NEXT: v_accvgpr_read_b32 v11, a11
+; GCN-NEXT: v_accvgpr_read_b32 v12, a12
+; GCN-NEXT: v_accvgpr_read_b32 v13, a13
+; GCN-NEXT: v_accvgpr_read_b32 v14, a14
+; GCN-NEXT: v_accvgpr_read_b32 v15, a15
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 1, i32 1, i32 1)
+ ret <16 x float> %result
+}
+
+define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__vgprcd(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, ptr addrspace(1) %out) #0 {
+; GCN-LABEL: test_mfma_f32_32x32x16_bf16__vgprcd:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
+; GCN-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xa4
+; GCN-NEXT: v_mov_b32_e32 v12, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
+; GCN-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
+; GCN-NEXT: v_mov_b64_e32 v[4:5], s[28:29]
+; GCN-NEXT: v_accvgpr_write_b32 a31, s23
+; GCN-NEXT: v_mov_b64_e32 v[6:7], s[30:31]
+; GCN-NEXT: v_accvgpr_write_b32 a30, s22
+; GCN-NEXT: v_accvgpr_write_b32 a29, s21
+; GCN-NEXT: v_accvgpr_write_b32 a28, s20
+; GCN-NEXT: v_accvgpr_write_b32 a27, s19
+; GCN-NEXT: v_accvgpr_write_b32 a26, s18
+; GCN-NEXT: v_accvgpr_write_b32 a25, s17
+; GCN-NEXT: v_accvgpr_write_b32 a24, s16
+; GCN-NEXT: v_accvgpr_write_b32 a23, s15
+; GCN-NEXT: v_accvgpr_write_b32 a22, s14
+; GCN-NEXT: v_accvgpr_write_b32 a21, s13
+; GCN-NEXT: v_accvgpr_write_b32 a20, s12
+; GCN-NEXT: v_accvgpr_write_b32 a19, s11
+; GCN-NEXT: v_accvgpr_write_b32 a18, s10
+; GCN-NEXT: v_accvgpr_write_b32 a17, s9
+; GCN-NEXT: v_accvgpr_write_b32 a16, s8
+; GCN-NEXT: v_mov_b32_e32 v8, s20
+; GCN-NEXT: v_mov_b32_e32 v9, s21
+; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[0:15], v[0:3], v[4:7], a[16:31]
+; GCN-NEXT: v_mov_b32_e32 v10, s22
+; GCN-NEXT: v_mov_b32_e32 v11, s23
+; GCN-NEXT: v_mov_b32_e32 v0, s16
+; GCN-NEXT: v_mov_b32_e32 v1, s17
+; GCN-NEXT: v_mov_b32_e32 v2, s18
+; GCN-NEXT: v_mov_b32_e32 v3, s19
+; GCN-NEXT: global_store_dwordx4 v12, v[8:11], s[0:1] offset:48 sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v12, v[0:3], s[0:1] offset:32 sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_mov_b32_e32 v0, s12
+; GCN-NEXT: v_mov_b32_e32 v1, s13
+; GCN-NEXT: v_mov_b32_e32 v2, s14
+; GCN-NEXT: v_mov_b32_e32 v3, s15
+; GCN-NEXT: global_store_dwordx4 v12, v[0:3], s[0:1] offset:16 sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_mov_b32_e32 v0, s8
+; GCN-NEXT: v_mov_b32_e32 v1, s9
+; GCN-NEXT: v_mov_b32_e32 v2, s10
+; GCN-NEXT: v_mov_b32_e32 v3, s11
+; GCN-NEXT: global_store_dwordx4 v12, v[0:3], s[0:1] sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v12, a[8:11], s[0:1] offset:32 sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v12, a[12:15], s[0:1] offset:48 sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v12, a[0:3], s[0:1] sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v12, a[4:7], s[0:1] offset:16 sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_endpgm
+ %result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 0, i32 0, i32 0)
+ store volatile <16 x float> %arg2, ptr addrspace(1) %out
+ store volatile <16 x float> %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__vgprcd__flags(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, ptr addrspace(1) %out) #0 {
+; GCN-LABEL: test_mfma_f32_32x32x16_bf16__vgprcd__flags:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
+; GCN-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xa4
+; GCN-NEXT: v_mov_b32_e32 v12, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
+; GCN-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
+; GCN-NEXT: v_mov_b64_e32 v[4:5], s[28:29]
+; GCN-NEXT: v_accvgpr_write_b32 a31, s23
+; GCN-NEXT: v_mov_b64_e32 v[6:7], s[30:31]
+; GCN-NEXT: v_accvgpr_write_b32 a30, s22
+; GCN-NEXT: v_accvgpr_write_b32 a29, s21
+; GCN-NEXT: v_accvgpr_write_b32 a28, s20
+; GCN-NEXT: v_accvgpr_write_b32 a27, s19
+; GCN-NEXT: v_accvgpr_write_b32 a26, s18
+; GCN-NEXT: v_accvgpr_write_b32 a25, s17
+; GCN-NEXT: v_accvgpr_write_b32 a24, s16
+; GCN-NEXT: v_accvgpr_write_b32 a23, s15
+; GCN-NEXT: v_accvgpr_write_b32 a22, s14
+; GCN-NEXT: v_accvgpr_write_b32 a21, s13
+; GCN-NEXT: v_accvgpr_write_b32 a20, s12
+; GCN-NEXT: v_accvgpr_write_b32 a19, s11
+; GCN-NEXT: v_accvgpr_write_b32 a18, s10
+; GCN-NEXT: v_accvgpr_write_b32 a17, s9
+; GCN-NEXT: v_accvgpr_write_b32 a16, s8
+; GCN-NEXT: v_mov_b32_e32 v8, s20
+; GCN-NEXT: v_mov_b32_e32 v9, s21
+; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[0:15], v[0:3], v[4:7], a[16:31] cbsz:1 abid:2 blgp:3
+; GCN-NEXT: v_mov_b32_e32 v10, s22
+; GCN-NEXT: v_mov_b32_e32 v11, s23
+; GCN-NEXT: v_mov_b32_e32 v0, s16
+; GCN-NEXT: v_mov_b32_e32 v1, s17
+; GCN-NEXT: v_mov_b32_e32 v2, s18
+; GCN-NEXT: v_mov_b32_e32 v3, s19
+; GCN-NEXT: global_store_dwordx4 v12, v[8:11], s[0:1] offset:48 sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v12, v[0:3], s[0:1] offset:32 sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_mov_b32_e32 v0, s12
+; GCN-NEXT: v_mov_b32_e32 v1, s13
+; GCN-NEXT: v_mov_b32_e32 v2, s14
+; GCN-NEXT: v_mov_b32_e32 v3, s15
+; GCN-NEXT: global_store_dwordx4 v12, v[0:3], s[0:1] offset:16 sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_nop 0
+; GCN-NEXT: v_mov_b32_e32 v0, s8
+; GCN-NEXT: v_mov_b32_e32 v1, s9
+; GCN-NEXT: v_mov_b32_e32 v2, s10
+; GCN-NEXT: v_mov_b32_e32 v3, s11
+; GCN-NEXT: global_store_dwordx4 v12, v[0:3], s[0:1] sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v12, a[8:11], s[0:1] offset:32 sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v12, a[12:15], s[0:1] offset:48 sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v12, a[0:3], s[0:1] sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: global_store_dwordx4 v12, a[4:7], s[0:1] offset:16 sc0 sc1
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_endpgm
+ %result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 1, i32 2, i32 3)
+ store volatile <16 x float> %arg2, ptr addrspace(1) %out
+ store volatile <16 x float> %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__vgprcd_mac(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, ptr addrspace(1) %out) #0 {
+; GCN-LABEL: test_mfma_f32_32x32x16_bf16__vgprcd_mac:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
+; GCN-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xa4
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
+; GCN-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
+; GCN-NEXT: v_mov_b64_e32 v[4:5], s[28:29]
+; GCN-NEXT: v_accvgpr_write_b32 a0, s8
+; GCN-NEXT: v_mov_b64_e32 v[6:7], s[30:31]
+; GCN-NEXT: v_accvgpr_write_b32 a1, s9
+; GCN-NEXT: v_accvgpr_write_b32 a2, s10
+; GCN-NEXT: v_accvgpr_write_b32 a3, s11
+; GCN-NEXT: v_accvgpr_write_b32 a4, s12
+; GCN-NEXT: v_accvgpr_write_b32 a5, s13
+; GCN-NEXT: v_accvgpr_write_b32 a6, s14
+; GCN-NEXT: v_accvgpr_write_b32 a7, s15
+; GCN-NEXT: v_accvgpr_write_b32 a8, s16
+; GCN-NEXT: v_accvgpr_write_b32 a9, s17
+; GCN-NEXT: v_accvgpr_write_b32 a10, s18
+; GCN-NEXT: v_accvgpr_write_b32 a11, s19
+; GCN-NEXT: v_accvgpr_write_b32 a12, s20
+; GCN-NEXT: v_accvgpr_write_b32 a13, s21
+; GCN-NEXT: v_accvgpr_write_b32 a14, s22
+; GCN-NEXT: v_accvgpr_write_b32 a15, s23
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[0:15], v[0:3], v[4:7], a[0:15]
+; GCN-NEXT: v_mov_b32_e32 v0, 0
+; GCN-NEXT: s_nop 7
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: global_store_dwordx4 v0, a[12:15], s[0:1] offset:48
+; GCN-NEXT: global_store_dwordx4 v0, a[8:11], s[0:1] offset:32
+; GCN-NEXT: global_store_dwordx4 v0, a[4:7], s[0:1] offset:16
+; GCN-NEXT: global_store_dwordx4 v0, a[0:3], s[0:1]
+; GCN-NEXT: s_endpgm
+ %result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 0, i32 0, i32 0)
+ store <16 x float> %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__vgprcd_mac_flags(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, ptr addrspace(1) %out) #0 {
+; GCN-LABEL: test_mfma_f32_32x32x16_bf16__vgprcd_mac_flags:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
+; GCN-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xa4
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
+; GCN-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
+; GCN-NEXT: v_mov_b64_e32 v[4:5], s[28:29]
+; GCN-NEXT: v_accvgpr_write_b32 a0, s8
+; GCN-NEXT: v_mov_b64_e32 v[6:7], s[30:31]
+; GCN-NEXT: v_accvgpr_write_b32 a1, s9
+; GCN-NEXT: v_accvgpr_write_b32 a2, s10
+; GCN-NEXT: v_accvgpr_write_b32 a3, s11
+; GCN-NEXT: v_accvgpr_write_b32 a4, s12
+; GCN-NEXT: v_accvgpr_write_b32 a5, s13
+; GCN-NEXT: v_accvgpr_write_b32 a6, s14
+; GCN-NEXT: v_accvgpr_write_b32 a7, s15
+; GCN-NEXT: v_accvgpr_write_b32 a8, s16
+; GCN-NEXT: v_accvgpr_write_b32 a9, s17
+; GCN-NEXT: v_accvgpr_write_b32 a10, s18
+; GCN-NEXT: v_accvgpr_write_b32 a11, s19
+; GCN-NEXT: v_accvgpr_write_b32 a12, s20
+; GCN-NEXT: v_accvgpr_write_b32 a13, s21
+; GCN-NEXT: v_accvgpr_write_b32 a14, s22
+; GCN-NEXT: v_accvgpr_write_b32 a15, s23
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[0:15], v[0:3], v[4:7], a[0:15] cbsz:3 abid:2 blgp:1
+; GCN-NEXT: v_mov_b32_e32 v0, 0
+; GCN-NEXT: s_nop 7
+; GCN-NEXT: s_nop 1
+; GCN-NEXT: global_store_dwordx4 v0, a[12:15], s[0:1] offset:48
+; GCN-NEXT: global_store_dwordx4 v0, a[8:11], s[0:1] offset:32
+; GCN-NEXT: global_store_dwordx4 v0, a[4:7], s[0:1] offset:16
+; GCN-NEXT: global_store_dwordx4 v0, a[0:3], s[0:1]
+; GCN-NEXT: s_endpgm
+ %result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 3, i32 2, i32 1)
+ store <16 x float> %result, ptr addrspace(1) %out
+ ret void
+}
+
+attributes #0 = { "amdgpu-flat-work-group-size"="512,512" }
+attributes #1 = { "amdgpu-flat-work-group-size"="1,64" }
diff --git a/llvm/test/MC/AMDGPU/mai-gfx950.s b/llvm/test/MC/AMDGPU/mai-gfx950.s
index deba548b6ae8e1..1d4902e293bb10 100644
--- a/llvm/test/MC/AMDGPU/mai-gfx950.s
+++ b/llvm/test/MC/AMDGPU/mai-gfx950.s
@@ -47,11 +47,11 @@ v_mfma_f32_16x16x32_f16 a[0:3], a[0:3], a[0:3], a[0:3] blgp:1
// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
v_mfma_f32_16x16x32_f16 a[0:3], a[0:3], a[0:3], a[0:3] cbsz:3
-// GFX950: v_mfma_f32_16x16x32_f16 a[0:3], a[0:3], a[0:3], a[0:3] abid:1 ; encoding: [0x00,0x88,0xd4,0xd3,0x00,0x01,0x02,0x1c]
+// GFX950: v_mfma_f32_16x16x32_f16 a[0:3], a[0:3], a[0:3], a[0:3] abid:1 ; encoding: [0x00,0x88,0xd4,0xd3,0x00,0x01,0x02,0x1c]
// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
v_mfma_f32_16x16x32_f16 a[0:3], a[0:3], a[0:3], a[0:3] abid:1
-// GFX950: v_mfma_f32_16x16x32_f16 a[0:3], a[0:3], a[0:3], a[0:3] cbsz:3 abid:1 ; encoding: [0x00,0x8b,0xd4,0xd3,0x00,0x01,0x02,0x1c]
+// GFX950: v_mfma_f32_16x16x32_f16 a[0:3], a[0:3], a[0:3], a[0:3] cbsz:3 abid:1 ; encoding: [0x00,0x8b,0xd4,0xd3,0x00,0x01,0x02,0x1c]
// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
v_mfma_f32_16x16x32_f16 a[0:3], a[0:3], a[0:3], a[0:3] cbsz:3 abid:1
@@ -75,7 +75,7 @@ v_mfma_f32_32x32x16_f16 v[0:15], v[0:3], v[0:3], v[0:15]
// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
v_mfma_f32_32x32x16_f16 a[0:15], a[0:3], a[0:3], a[0:15]
-// GFX950: v_mfma_f32_32x32x16_f16 v[0:15], v[0:3], v[0:3], v[0:15] ; encoding: [0x00,0x00,0xd5,0xd3,0x00,0x01,0x02,0x04]
+// GFX950: v_mfma_f32_32x32x16_f16 v[0:15], v[0:3], v[0:3], v[0:15] ; encoding: [0x00,0x00,0xd5,0xd3,0x00,0x01,0x02,0x04]
// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
v_mfma_f32_32x32x16f16 v[0:15], v[0:3], v[0:3], v[0:15]
@@ -91,7 +91,7 @@ v_mfma_f32_32x32x16_f16 v[0:15], v[0:3], v[0:3], 1.0
// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
v_mfma_f32_32x32x16_f16 a[0:15], a[0:3], a[0:3], 1.0
-// GFX950: v_mfma_f32_32x32x16_f16 v[0:15], v[0:3], v[0:3], v[0:15] blgp:5 ; encoding: [0x00,0x00,0xd5,0xd3,0x00,0x01,0x02,0xa4]
+// GFX950: v_mfma_f32_32x32x16_f16 v[0:15], v[0:3], v[0:3], v[0:15] blgp:5 ; encoding: [0x00,0x00,0xd5,0xd3,0x00,0x01,0x02,0xa4]
// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
v_mfma_f32_32x32x16_f16 v[0:15], v[0:3], v[0:3], v[0:15] blgp:5
@@ -110,3 +110,51 @@ v_mfma_f32_32x32x16_f16 v[0:15], v[0:3], v[0:3], v[0:15] abid:1
// GFX950: v_mfma_f32_32x32x16_f16 a[0:15], a[0:3], a[0:3], a[0:15] cbsz:3 abid:1 ; encoding: [0x00,0x8b,0xd5,0xd3,0x00,0x01,0x02,0x1c]
// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
v_mfma_f32_32x32x16_f16 a[0:15], a[0:3], a[0:3], a[0:15] cbsz:3 abid:1
+
+//===----------------------------------------------------------------------===//
+// v_mfma_f32_32x32x16_bf16
+//===----------------------------------------------------------------------===//
+
+// GFX950: v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15] ; encoding: [0x00,0x00,0xb7,0xd3,0x00,0x01,0x02,0x04]
+// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15]
+
+// GFX950: v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], a[0:15] ; encoding: [0x00,0x80,0xb7,0xd3,0x00,0x01,0x02,0x1c]
+// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], a[0:15]
+
+// GFX950: v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15] ; encoding: [0x00,0x00,0xb7,0xd3,0x00,0x01,0x02,0x04]
+// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+v_mfma_f32_32x32x16bf16 v[0:15], v[0:3], v[0:3], v[0:15]
+
+// GFX950: v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], a[0:15] ; encoding: [0x00,0x80,0xb7,0xd3,0x00,0x01,0x02,0x1c]
+// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+v_mfma_f32_32x32x16bf16 a[0:15], a[0:3], a[0:3], a[0:15]
+
+// GFX950: v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], 1.0 ; encoding: [0x00,0x00,0xb7,0xd3,0x00,0x01,0xca,0x03]
+// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], 1.0
+
+// GFX950: v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], 1.0 ; encoding: [0x00,0x80,0xb7,0xd3,0x00,0x01,0xca,0x1b]
+// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], 1.0
+
+// GFX950: v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15] blgp:5 ; encoding: [0x00,0x00,0xb7,0xd3,0x00,0x01,0x02,0xa4]
+// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15] blgp:5
+
+// GFX950: v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], a[0:15] blgp:2 ; encoding: [0x00,0x80,0xb7,0xd3,0x00,0x01,0x02,0x5c]
+// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], a[0:15] blgp:2
+
+// GFX950: v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15] cbsz:3 ; encoding: [0x00,0x03,0xb7,0xd3,0x00,0x01,0x02,0x04]
+// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15] cbsz:3
+
+// GFX950: v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15] abid:1 ; encoding: [0x00,0x08,0xb7,0xd3,0x00,0x01,0x02,0x04]
+// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15] abid:1
+
+// GFX950: v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], a[0:15] cbsz:3 abid:1 ; encoding: [0x00,0x8b,0xb7,0xd3,0x00,0x01,0x02,0x1c]
+// ERR: :[[@LINE+1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], a[0:15] cbsz:3 abid:1
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx950_mai.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx950_mai.txt
index 68b52672b445de..292f2a348df2ef 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx950_mai.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx950_mai.txt
@@ -59,3 +59,30 @@
# GFX950: v_mfma_f32_32x32x16_f16 v[0:15], v[0:3], v[0:3], v[0:15] cbsz:3 ; encoding: [0x00,0x03,0xd5,0xd3,0x00,0x01,0x02,0x04]
0x00,0x03,0xd5,0xd3,0x00,0x01,0x02,0x04
+
+# GFX950: v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], 1.0 ; encoding: [0x00,0x80,0xb7,0xd3,0x00,0x01,0xca,0x1b]
+0x00,0x80,0xb7,0xd3,0x00,0x01,0xca,0x1b
+
+# GFX950: v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], a[0:15] ; encoding: [0x00,0x80,0xb7,0xd3,0x00,0x01,0x02,0x1c]
+0x00,0x80,0xb7,0xd3,0x00,0x01,0x02,0x1c
+
+# GFX950: v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], a[0:15] blgp:2 ; encoding: [0x00,0x80,0xb7,0xd3,0x00,0x01,0x02,0x5c]
+0x00,0x80,0xb7,0xd3,0x00,0x01,0x02,0x5c
+
+# GFX950: v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], a[0:15] cbsz:3 abid:1 ; encoding: [0x00,0x8b,0xb7,0xd3,0x00,0x01,0x02,0x1c]
+0x00,0x8b,0xb7,0xd3,0x00,0x01,0x02,0x1c
+
+# GFX950: v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], 1.0 ; encoding: [0x00,0x00,0xb7,0xd3,0x00,0x01,0xca,0x03]
+0x00,0x00,0xb7,0xd3,0x00,0x01,0xca,0x03
+
+# GFX950: v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15] ; encoding: [0x00,0x00,0xb7,0xd3,0x00,0x01,0x02,0x04]
+0x00,0x00,0xb7,0xd3,0x00,0x01,0x02,0x04
+
+# GFX950: v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15] abid:1 ; encoding: [0x00,0x08,0xb7,0xd3,0x00,0x01,0x02,0x04]
+0x00,0x08,0xb7,0xd3,0x00,0x01,0x02,0x04
+
+# GFX950: v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15] blgp:5 ; encoding: [0x00,0x00,0xb7,0xd3,0x00,0x01,0x02,0xa4]
+0x00,0x00,0xb7,0xd3,0x00,0x01,0x02,0xa4
+
+# GFX950: v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15] cbsz:3 ; encoding: [0x00,0x03,0xb7,0xd3,0x00,0x01,0x02,0x04]
+0x00,0x03,0xb7,0xd3,0x00,0x01,0x02,0x04
diff --git a/llvm/test/tools/llvm-mca/AMDGPU/gfx950.s b/llvm/test/tools/llvm-mca/AMDGPU/gfx950.s
index 66affe8f930afb..667fb7d78a87bd 100644
--- a/llvm/test/tools/llvm-mca/AMDGPU/gfx950.s
+++ b/llvm/test/tools/llvm-mca/AMDGPU/gfx950.s
@@ -1,18 +1,22 @@
# RUN: llvm-mca -mtriple=amdgcn -mcpu=gfx950 --timeline --iterations=1 --timeline-max-cycles=0 < %s | FileCheck %s
# CHECK: Iterations: 1
-# CHECK: Instructions: 4
-# CHECK: Total Cycles: 25
-# CHECK: Total uOps: 4
+# CHECK: Instructions: 6
+# CHECK: Total Cycles: 41
+# CHECK: Total uOps: 6
v_mfma_f32_16x16x32_f16 a[0:3], a[0:3], a[0:3], a[0:3] blgp:1
v_mfma_f32_16x16x32_f16 a[0:3], v[0:3], v[0:3], a[4:7]
v_mfma_f32_32x32x16_f16 v[0:15], v[0:3], v[0:3], v[0:15]
v_mfma_f32_32x32x16_f16 a[0:15], a[0:3], a[0:3], a[0:15] blgp:2
+v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15]
+v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], a[0:15] blgp:2
# CHECK: [0] [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: - - - - - - 4.00 v_mfma_f32_16x16x32_f16 a[0:3], a[0:3], a[0:3], a[0:3] blgp:1
# CHECK-NEXT: - - - - - - 4.00 v_mfma_f32_16x16x32_f16 a[0:3], v[0:3], v[0:3], a[4:7]
# CHECK-NEXT: - - - - - - 8.00 v_mfma_f32_32x32x16_f16 v[0:15], v[0:3], v[0:3], v[0:15]
# CHECK-NEXT: - - - - - - 8.00 v_mfma_f32_32x32x16_f16 a[0:15], a[0:3], a[0:3], a[0:15] blgp:2
+# CHECK-NEXT: - - - - - - 8.00 v_mfma_f32_32x32x16_bf16 v[0:15], v[0:3], v[0:3], v[0:15]
+# CHECK-NEXT: - - - - - - 8.00 v_mfma_f32_32x32x16_bf16 a[0:15], a[0:3], a[0:3], a[0:15] blgp:2
>From 50224bd5ba009f02a012e22c0f87eba0028d6d88 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Mon, 18 Nov 2024 21:58:02 -0800
Subject: [PATCH 09/22] AMDGPU: Handle gfx950 global_load_lds_* instructions
(#116680)
Define global_load_lds_dwordx3 and global_load_dwordx4.
Oddly it seems dwordx2 was skipped.
---
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 2 +-
.../AMDGPU/AMDGPUInstructionSelector.cpp | 10 ++
llvm/lib/Target/AMDGPU/FLATInstructions.td | 9 ++
llvm/lib/Target/AMDGPU/GCNSubtarget.h | 7 +
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 10 ++
.../llvm.amdgcn.global.load.lds.gfx950.ll | 137 ++++++++++++++++++
llvm/test/MC/AMDGPU/gfx950_asm_features.s | 37 +++++
llvm/test/MC/Disassembler/AMDGPU/gfx950.txt | 25 ++++
8 files changed, 236 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.lds.gfx950.ll
create mode 100644 llvm/test/MC/AMDGPU/gfx950_asm_features.s
create mode 100644 llvm/test/MC/Disassembler/AMDGPU/gfx950.txt
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 15f33cdbf92e6e..f43ab50d2ea441 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -2452,7 +2452,7 @@ class AMDGPUGlobalLoadLDS :
[],
[LLVMQualPointerType<1>, // Base global pointer to load from
LLVMQualPointerType<3>, // LDS base pointer to store to
- llvm_i32_ty, // Data byte size: 1/2/4
+ llvm_i32_ty, // Data byte size: 1/2/4 (/12/16 for gfx950)
llvm_i32_ty, // imm offset (applied to both global and LDS address)
llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = sc0,
// bit 1 = sc1,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 13de93e829fab2..a6ef0069f134bd 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -3329,6 +3329,16 @@ bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
case 4:
Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
break;
+ case 12:
+ if (!Subtarget->hasLDSLoadB96_B128())
+ return false;
+ Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORDX3;
+ break;
+ case 16:
+ if (!Subtarget->hasLDSLoadB96_B128())
+ return false;
+ Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORDX4;
+ break;
}
MachineBasicBlock *MBB = MI.getParent();
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index db74372e9db452..861fcf017d9e4d 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -934,6 +934,11 @@ defm GLOBAL_LOAD_LDS_USHORT : FLAT_Global_Load_LDS_Pseudo <"global_load_lds_usho
defm GLOBAL_LOAD_LDS_SSHORT : FLAT_Global_Load_LDS_Pseudo <"global_load_lds_sshort">;
defm GLOBAL_LOAD_LDS_DWORD : FLAT_Global_Load_LDS_Pseudo <"global_load_lds_dword">;
+let SubtargetPredicate = HasGFX950Insts in {
+defm GLOBAL_LOAD_LDS_DWORDX3 : FLAT_Global_Load_LDS_Pseudo <"global_load_lds_dwordx3">;
+defm GLOBAL_LOAD_LDS_DWORDX4 : FLAT_Global_Load_LDS_Pseudo <"global_load_lds_dwordx4">;
+}
+
let SubtargetPredicate = isGFX12Plus in {
defm GLOBAL_ATOMIC_COND_SUB_U32 : FLAT_Global_Atomic_Pseudo <"global_atomic_cond_sub_u32", VGPR_32, i32>;
defm GLOBAL_ATOMIC_ORDERED_ADD_B64 : FLAT_Global_Atomic_Pseudo <"global_atomic_ordered_add_b64", VReg_64, i64>;
@@ -1980,6 +1985,10 @@ defm GLOBAL_LOAD_LDS_USHORT : FLAT_Real_AllAddr_LDS <0x028, 0x12>;
defm GLOBAL_LOAD_LDS_SSHORT : FLAT_Real_AllAddr_LDS <0x029, 0x13>;
defm GLOBAL_LOAD_LDS_DWORD : FLAT_Real_AllAddr_LDS <0x02a, 0x14>;
+defm GLOBAL_LOAD_LDS_DWORDX3 : FLAT_Real_AllAddr_LDS <0x07e, 0x07e>;
+defm GLOBAL_LOAD_LDS_DWORDX4 : FLAT_Real_AllAddr_LDS <0x07d, 0x07d>;
+
+
defm GLOBAL_ATOMIC_SWAP : FLAT_Global_Real_Atomics_vi <0x40>;
defm GLOBAL_ATOMIC_CMPSWAP : FLAT_Global_Real_Atomics_vi <0x41>;
defm GLOBAL_ATOMIC_ADD : FLAT_Global_Real_Atomics_vi <0x42>;
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index 4a6efe533230b1..f3f96940c1f44b 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -1289,6 +1289,13 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
// hasGFX940Insts and hasGFX90AInsts are also true.
bool hasGFX950Insts() const { return GFX950Insts; }
+ /// Returns true if the target supports
+ /// global_load_lds_dwordx3/global_load_lds_dwordx4 or
+ /// buffer_load_dwordx3/buffer_load_dwordx4 with the lds bit.
+ bool hasLDSLoadB96_B128() const {
+ return hasGFX950Insts();
+ }
+
bool hasSALUFloatInsts() const { return HasSALUFloatInsts; }
bool hasPseudoScalarTrans() const { return HasPseudoScalarTrans; }
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index ad89812558d25c..0f7764906527d0 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -9894,6 +9894,16 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
case 4:
Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
break;
+ case 12:
+ if (!Subtarget->hasLDSLoadB96_B128())
+ return SDValue();
+ Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORDX3;
+ break;
+ case 16:
+ if (!Subtarget->hasLDSLoadB96_B128())
+ return SDValue();
+ Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORDX4;
+ break;
}
auto *M = cast<MemSDNode>(Op);
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.lds.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.lds.gfx950.ll
new file mode 100644
index 00000000000000..b7819ea0431588
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.lds.gfx950.ll
@@ -0,0 +1,137 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -check-prefixes=GFX950,GFX950-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -check-prefixes=GFX950,GFX950-GISEL %s
+
+declare void @llvm.amdgcn.global.load.lds(ptr addrspace(1) nocapture %gptr, ptr addrspace(3) nocapture %lptr, i32 %size, i32 %offset, i32 %aux)
+
+;---------------------------------------------------------------------y
+; dwordx3
+;---------------------------------------------------------------------
+
+define amdgpu_ps void @global_load_lds_dwordx3_vaddr(ptr addrspace(1) nocapture %gptr, ptr addrspace(3) nocapture %lptr) {
+; GFX950-SDAG-LABEL: global_load_lds_dwordx3_vaddr:
+; GFX950-SDAG: ; %bb.0:
+; GFX950-SDAG-NEXT: v_readfirstlane_b32 s0, v2
+; GFX950-SDAG-NEXT: s_mov_b32 m0, s0
+; GFX950-SDAG-NEXT: s_nop 0
+; GFX950-SDAG-NEXT: global_load_lds_dwordx3 v[0:1], off offset:16 sc0
+; GFX950-SDAG-NEXT: s_endpgm
+;
+; GFX950-GISEL-LABEL: global_load_lds_dwordx3_vaddr:
+; GFX950-GISEL: ; %bb.0:
+; GFX950-GISEL-NEXT: v_readfirstlane_b32 m0, v2
+; GFX950-GISEL-NEXT: s_nop 4
+; GFX950-GISEL-NEXT: global_load_lds_dwordx3 v[0:1], off offset:16 sc0
+; GFX950-GISEL-NEXT: s_endpgm
+ call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %gptr, ptr addrspace(3) %lptr, i32 12, i32 16, i32 1)
+ ret void
+}
+
+define amdgpu_ps void @global_load_lds_dwordx3_saddr(ptr addrspace(1) nocapture inreg %gptr, ptr addrspace(3) nocapture %lptr) {
+; GFX950-SDAG-LABEL: global_load_lds_dwordx3_saddr:
+; GFX950-SDAG: ; %bb.0:
+; GFX950-SDAG-NEXT: v_readfirstlane_b32 s2, v0
+; GFX950-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX950-SDAG-NEXT: s_mov_b32 m0, s2
+; GFX950-SDAG-NEXT: s_nop 0
+; GFX950-SDAG-NEXT: global_load_lds_dwordx3 v1, s[0:1] offset:32 nt
+; GFX950-SDAG-NEXT: s_endpgm
+;
+; GFX950-GISEL-LABEL: global_load_lds_dwordx3_saddr:
+; GFX950-GISEL: ; %bb.0:
+; GFX950-GISEL-NEXT: v_readfirstlane_b32 m0, v0
+; GFX950-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX950-GISEL-NEXT: s_nop 3
+; GFX950-GISEL-NEXT: global_load_lds_dwordx3 v0, s[0:1] offset:32 nt
+; GFX950-GISEL-NEXT: s_endpgm
+ call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %gptr, ptr addrspace(3) %lptr, i32 12, i32 32, i32 2)
+ ret void
+}
+
+define amdgpu_ps void @global_load_lds_dwordx3_saddr_and_vaddr(ptr addrspace(1) nocapture inreg %gptr, ptr addrspace(3) nocapture %lptr, i32 %voffset) {
+; GFX950-SDAG-LABEL: global_load_lds_dwordx3_saddr_and_vaddr:
+; GFX950-SDAG: ; %bb.0:
+; GFX950-SDAG-NEXT: v_readfirstlane_b32 s2, v0
+; GFX950-SDAG-NEXT: s_mov_b32 m0, s2
+; GFX950-SDAG-NEXT: s_nop 0
+; GFX950-SDAG-NEXT: global_load_lds_dwordx3 v1, s[0:1] offset:48 sc1
+; GFX950-SDAG-NEXT: s_endpgm
+;
+; GFX950-GISEL-LABEL: global_load_lds_dwordx3_saddr_and_vaddr:
+; GFX950-GISEL: ; %bb.0:
+; GFX950-GISEL-NEXT: v_readfirstlane_b32 m0, v0
+; GFX950-GISEL-NEXT: s_nop 4
+; GFX950-GISEL-NEXT: global_load_lds_dwordx3 v1, s[0:1] offset:48 sc1
+; GFX950-GISEL-NEXT: s_endpgm
+ %voffset.64 = zext i32 %voffset to i64
+ %gep = getelementptr i8, ptr addrspace(1) %gptr, i64 %voffset.64
+ call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %gep, ptr addrspace(3) %lptr, i32 12, i32 48, i32 16)
+ ret void
+}
+
+;---------------------------------------------------------------------
+; dwordx4
+;---------------------------------------------------------------------
+
+define amdgpu_ps void @global_load_lds_dwordx4_vaddr(ptr addrspace(1) nocapture %gptr, ptr addrspace(3) nocapture %lptr) {
+; GFX950-SDAG-LABEL: global_load_lds_dwordx4_vaddr:
+; GFX950-SDAG: ; %bb.0:
+; GFX950-SDAG-NEXT: v_readfirstlane_b32 s0, v2
+; GFX950-SDAG-NEXT: s_mov_b32 m0, s0
+; GFX950-SDAG-NEXT: s_nop 0
+; GFX950-SDAG-NEXT: global_load_lds_dwordx4 v[0:1], off offset:16 sc0
+; GFX950-SDAG-NEXT: s_endpgm
+;
+; GFX950-GISEL-LABEL: global_load_lds_dwordx4_vaddr:
+; GFX950-GISEL: ; %bb.0:
+; GFX950-GISEL-NEXT: v_readfirstlane_b32 m0, v2
+; GFX950-GISEL-NEXT: s_nop 4
+; GFX950-GISEL-NEXT: global_load_lds_dwordx4 v[0:1], off offset:16 sc0
+; GFX950-GISEL-NEXT: s_endpgm
+ call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %gptr, ptr addrspace(3) %lptr, i32 16, i32 16, i32 1)
+ ret void
+}
+
+define amdgpu_ps void @global_load_lds_dwordx4_saddr(ptr addrspace(1) nocapture inreg %gptr, ptr addrspace(3) nocapture %lptr) {
+; GFX950-SDAG-LABEL: global_load_lds_dwordx4_saddr:
+; GFX950-SDAG: ; %bb.0:
+; GFX950-SDAG-NEXT: v_readfirstlane_b32 s2, v0
+; GFX950-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX950-SDAG-NEXT: s_mov_b32 m0, s2
+; GFX950-SDAG-NEXT: s_nop 0
+; GFX950-SDAG-NEXT: global_load_lds_dwordx4 v1, s[0:1] offset:32 nt
+; GFX950-SDAG-NEXT: s_endpgm
+;
+; GFX950-GISEL-LABEL: global_load_lds_dwordx4_saddr:
+; GFX950-GISEL: ; %bb.0:
+; GFX950-GISEL-NEXT: v_readfirstlane_b32 m0, v0
+; GFX950-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX950-GISEL-NEXT: s_nop 3
+; GFX950-GISEL-NEXT: global_load_lds_dwordx4 v0, s[0:1] offset:32 nt
+; GFX950-GISEL-NEXT: s_endpgm
+ call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %gptr, ptr addrspace(3) %lptr, i32 16, i32 32, i32 2)
+ ret void
+}
+
+define amdgpu_ps void @global_load_lds_dwordx4_saddr_and_vaddr(ptr addrspace(1) nocapture inreg %gptr, ptr addrspace(3) nocapture %lptr, i32 %voffset) {
+; GFX950-SDAG-LABEL: global_load_lds_dwordx4_saddr_and_vaddr:
+; GFX950-SDAG: ; %bb.0:
+; GFX950-SDAG-NEXT: v_readfirstlane_b32 s2, v0
+; GFX950-SDAG-NEXT: s_mov_b32 m0, s2
+; GFX950-SDAG-NEXT: s_nop 0
+; GFX950-SDAG-NEXT: global_load_lds_dwordx4 v1, s[0:1] offset:48 sc1
+; GFX950-SDAG-NEXT: s_endpgm
+;
+; GFX950-GISEL-LABEL: global_load_lds_dwordx4_saddr_and_vaddr:
+; GFX950-GISEL: ; %bb.0:
+; GFX950-GISEL-NEXT: v_readfirstlane_b32 m0, v0
+; GFX950-GISEL-NEXT: s_nop 4
+; GFX950-GISEL-NEXT: global_load_lds_dwordx4 v1, s[0:1] offset:48 sc1
+; GFX950-GISEL-NEXT: s_endpgm
+ %voffset.64 = zext i32 %voffset to i64
+ %gep = getelementptr i8, ptr addrspace(1) %gptr, i64 %voffset.64
+ call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %gep, ptr addrspace(3) %lptr, i32 16, i32 48, i32 16)
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX950: {{.*}}
diff --git a/llvm/test/MC/AMDGPU/gfx950_asm_features.s b/llvm/test/MC/AMDGPU/gfx950_asm_features.s
new file mode 100644
index 00000000000000..405d152c93d867
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx950_asm_features.s
@@ -0,0 +1,37 @@
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx950 -show-encoding %s | FileCheck --check-prefix=GFX950 --strict-whitespace %s
+// xUN: not llvm-mc -triple=amdgcn -mcpu=gfx940 %s 2>&1 | FileCheck --check-prefixes=NOT-GFX950,GFX940 --implicit-check-not=error: %s
+// xUN: not llvm-mc -triple=amdgcn -mcpu=gfx90a %s 2>&1 | FileCheck --check-prefixes=NOT-GFX950,GFX90A --implicit-check-not=error: %s
+// xUN: not llvm-mc -triple=amdgcn -mcpu=gfx1010 %s 2>&1 | FileCheck --check-prefixes=NOT-GFX950,GFX10 --implicit-check-not=error: %s
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX950: global_load_lds_dwordx3 v[2:3], off ; encoding: [0x00,0x80,0xf8,0xdd,0x02,0x00,0x7f,0x00]
+
+global_load_lds_dwordx3 v[2:3], off
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: global_load_lds_dwordx3 v[2:3], off sc0 nt sc1 ; encoding: [0x00,0x80,0xfb,0xdf,0x02,0x00,0x7f,0x00]
+global_load_lds_dwordx3 v[2:3], off sc0 nt sc1
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: global_load_lds_dwordx3 v[2:3], off offset:4 ; encoding: [0x04,0x80,0xf8,0xdd,0x02,0x00,0x7f,0x00]
+global_load_lds_dwordx3 v[2:3], off offset:4
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: global_load_lds_dwordx3 v2, s[4:5] offset:4 ; encoding: [0x04,0x80,0xf8,0xdd,0x02,0x00,0x04,0x00]
+global_load_lds_dwordx3 v2, s[4:5] offset:4
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX950: global_load_lds_dwordx4 v[2:3], off ; encoding: [0x00,0x80,0xf4,0xdd,0x02,0x00,0x7f,0x00]
+global_load_lds_dwordx4 v[2:3], off
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: global_load_lds_dwordx4 v[2:3], off sc0 nt sc1 ; encoding: [0x00,0x80,0xf7,0xdf,0x02,0x00,0x7f,0x00]
+global_load_lds_dwordx4 v[2:3], off sc0 nt sc1
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: global_load_lds_dwordx4 v[2:3], off offset:4 ; encoding: [0x04,0x80,0xf4,0xdd,0x02,0x00,0x7f,0x00]
+global_load_lds_dwordx4 v[2:3], off offset:4
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: global_load_lds_dwordx4 v2, s[4:5] offset:4 ; encoding: [0x04,0x80,0xf4,0xdd,0x02,0x00,0x04,0x00]
+global_load_lds_dwordx4 v2, s[4:5] offset:4
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx950.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx950.txt
new file mode 100644
index 00000000000000..a9f28332860ee5
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx950.txt
@@ -0,0 +1,25 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx950 -show-encoding -disassemble %s | FileCheck -check-prefix=GFX950 %s
+
+# GFX950: global_load_lds_dwordx3 v2, s[4:5] offset:4 ; encoding: [0x04,0x80,0xf8,0xdd,0x02,0x00,0x04,0x00]
+0x04,0x80,0xf8,0xdd,0x02,0x00,0x04,0x00
+
+# GFX950: global_load_lds_dwordx3 v[2:3], off ; encoding: [0x00,0x80,0xf8,0xdd,0x02,0x00,0x7f,0x00]
+0x00,0x80,0xf8,0xdd,0x02,0x00,0x7f,0x00
+
+# GFX950: global_load_lds_dwordx3 v[2:3], off offset:4 ; encoding: [0x04,0x80,0xf8,0xdd,0x02,0x00,0x7f,0x00]
+0x04,0x80,0xf8,0xdd,0x02,0x00,0x7f,0x00
+
+# GFX950: global_load_lds_dwordx3 v[2:3], off sc0 nt sc1 ; encoding: [0x00,0x80,0xfb,0xdf,0x02,0x00,0x7f,0x00]
+0x00,0x80,0xfb,0xdf,0x02,0x00,0x7f,0x00
+
+# GFX950: global_load_lds_dwordx4 v2, s[4:5] offset:4 ; encoding: [0x04,0x80,0xf4,0xdd,0x02,0x00,0x04,0x00]
+0x04,0x80,0xf4,0xdd,0x02,0x00,0x04,0x00
+
+# GFX950: global_load_lds_dwordx4 v[2:3], off ; encoding: [0x00,0x80,0xf4,0xdd,0x02,0x00,0x7f,0x00]
+0x00,0x80,0xf4,0xdd,0x02,0x00,0x7f,0x00
+
+# GFX950: global_load_lds_dwordx4 v[2:3], off offset:4 ; encoding: [0x04,0x80,0xf4,0xdd,0x02,0x00,0x7f,0x00]
+0x04,0x80,0xf4,0xdd,0x02,0x00,0x7f,0x00
+
+# GFX950: global_load_lds_dwordx4 v[2:3], off sc0 nt sc1 ; encoding: [0x00,0x80,0xf7,0xdf,0x02,0x00,0x7f,0x00]
+0x00,0x80,0xf7,0xdf,0x02,0x00,0x7f,0x00
>From 927032807dfdca5d94eb0a8707d38b605e95e407 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Mon, 18 Nov 2024 22:01:56 -0800
Subject: [PATCH 10/22] AMDGPU: Handle gfx950 96/128-bit buffer_load_lds
(#116681)
Enforcing this limit in the clang builtin will come later.
---
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 8 +-
.../AMDGPU/AMDGPUInstructionSelector.cpp | 18 ++
llvm/lib/Target/AMDGPU/BUFInstructions.td | 24 ++-
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 16 ++
.../llvm.amdgcn.global.load.lds.gfx950.ll | 8 +
...m.amdgcn.raw.ptr.buffer.load.lds.gfx950.ll | 176 ++++++++++++++++
...mdgcn.struct.ptr.buffer.load.lds.gfx950.ll | 196 ++++++++++++++++++
llvm/test/MC/AMDGPU/mubuf-gfx950.s | 32 +++
llvm/test/MC/Disassembler/AMDGPU/gfx950.txt | 19 ++
9 files changed, 485 insertions(+), 12 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.lds.gfx950.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.lds.gfx950.ll
create mode 100644 llvm/test/MC/AMDGPU/mubuf-gfx950.s
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index f43ab50d2ea441..360af786c5160d 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1674,7 +1674,7 @@ class AMDGPURawBufferLoadLDS : Intrinsic <
[],
[llvm_v4i32_ty, // rsrc(SGPR)
LLVMQualPointerType<3>, // LDS base offset
- llvm_i32_ty, // Data byte size: 1/2/4
+ llvm_i32_ty, // Data byte size: 1/2/4 (/12/16 for gfx950)
llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty, // imm offset(imm, included in bounds checking and swizzling)
@@ -1693,7 +1693,7 @@ class AMDGPURawPtrBufferLoadLDS : Intrinsic <
[],
[AMDGPUBufferRsrcTy, // rsrc(SGPR)
LLVMQualPointerType<3>, // LDS base offset
- llvm_i32_ty, // Data byte size: 1/2/4
+ llvm_i32_ty, // Data byte size: 1/2/4 (/12/16 for gfx950)
llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty, // imm offset(imm, included in bounds checking and swizzling)
@@ -1715,7 +1715,7 @@ class AMDGPUStructBufferLoadLDS : Intrinsic <
[],
[llvm_v4i32_ty, // rsrc(SGPR)
LLVMQualPointerType<3>, // LDS base offset
- llvm_i32_ty, // Data byte size: 1/2/4
+ llvm_i32_ty, // Data byte size: 1/2/4 (/12/16 for gfx950)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
@@ -1735,7 +1735,7 @@ class AMDGPUStructPtrBufferLoadLDS : Intrinsic <
[],
[AMDGPUBufferRsrcTy, // rsrc(SGPR)
LLVMQualPointerType<3>, // LDS base offset
- llvm_i32_ty, // Data byte size: 1/2/4
+ llvm_i32_ty, // Data byte size: 1/2/4 (/12/16 for gfx950)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index a6ef0069f134bd..3522ece24f1c45 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -3240,6 +3240,24 @@ bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
: HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
: AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
break;
+ case 12:
+ if (!Subtarget->hasLDSLoadB96_B128())
+ return false;
+
+ Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORDX3_LDS_BOTHEN
+ : AMDGPU::BUFFER_LOAD_DWORDX3_LDS_IDXEN
+ : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORDX3_LDS_OFFEN
+ : AMDGPU::BUFFER_LOAD_DWORDX3_LDS_OFFSET;
+ break;
+ case 16:
+ if (!Subtarget->hasLDSLoadB96_B128())
+ return false;
+
+ Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORDX4_LDS_BOTHEN
+ : AMDGPU::BUFFER_LOAD_DWORDX4_LDS_IDXEN
+ : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORDX4_LDS_OFFEN
+ : AMDGPU::BUFFER_LOAD_DWORDX4_LDS_OFFSET;
+ break;
}
MachineBasicBlock *MBB = MI.getParent();
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index 79d6a825f60b03..7283733dea22db 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -573,9 +573,17 @@ multiclass MUBUF_Pseudo_Loads<string opName, ValueType load_vt = i32,
}
}
-multiclass MUBUF_Pseudo_Loads_Lds<string opName, ValueType load_vt = i32> {
+multiclass MUBUF_Pseudo_Loads_Lds<string opName, ValueType load_vt = i32, Predicate LDSPred = TruePredicate> {
defm NAME : MUBUF_Pseudo_Loads<opName, load_vt>;
- defm _LDS : MUBUF_Pseudo_Loads<opName, load_vt, 0, 1>;
+
+ if !ne(LDSPred, TruePredicate) then {
+ let SubtargetPredicate = LDSPred in {
+ defm _LDS : MUBUF_Pseudo_Loads<opName, load_vt, 0, 1>;
+ }
+ } else {
+ defm _LDS : MUBUF_Pseudo_Loads<opName, load_vt, 0, 1>;
+ }
+
}
multiclass MUBUF_Pseudo_Loads_LDSOpc<string opName,
@@ -956,11 +964,11 @@ defm BUFFER_LOAD_DWORD : MUBUF_Pseudo_Loads_Lds <
defm BUFFER_LOAD_DWORDX2 : MUBUF_Pseudo_Loads <
"buffer_load_dwordx2", v2i32
>;
-defm BUFFER_LOAD_DWORDX3 : MUBUF_Pseudo_Loads <
- "buffer_load_dwordx3", v3i32
+defm BUFFER_LOAD_DWORDX3 : MUBUF_Pseudo_Loads_Lds <
+ "buffer_load_dwordx3", v3i32, /*LDSPred=*/HasGFX950Insts
>;
-defm BUFFER_LOAD_DWORDX4 : MUBUF_Pseudo_Loads <
- "buffer_load_dwordx4", v4i32
+defm BUFFER_LOAD_DWORDX4 : MUBUF_Pseudo_Loads_Lds <
+ "buffer_load_dwordx4", v4i32, /*LDSPred=*/HasGFX950Insts
>;
defm BUFFER_LOAD_LDS_B32 : MUBUF_Pseudo_Loads_LDSOpc <
@@ -3231,8 +3239,8 @@ defm BUFFER_LOAD_USHORT : MUBUF_Real_AllAddr_Lds_vi <0x12>;
defm BUFFER_LOAD_SSHORT : MUBUF_Real_AllAddr_Lds_vi <0x13>;
defm BUFFER_LOAD_DWORD : MUBUF_Real_AllAddr_Lds_vi <0x14>;
defm BUFFER_LOAD_DWORDX2 : MUBUF_Real_AllAddr_vi <0x15>;
-defm BUFFER_LOAD_DWORDX3 : MUBUF_Real_AllAddr_vi <0x16>;
-defm BUFFER_LOAD_DWORDX4 : MUBUF_Real_AllAddr_vi <0x17>;
+defm BUFFER_LOAD_DWORDX3 : MUBUF_Real_AllAddr_Lds_vi <0x16>;
+defm BUFFER_LOAD_DWORDX4 : MUBUF_Real_AllAddr_Lds_vi <0x17>;
defm BUFFER_STORE_BYTE : MUBUF_Real_AllAddr_vi <0x18>;
defm BUFFER_STORE_BYTE_D16_HI : MUBUF_Real_AllAddr_vi <0x19>;
defm BUFFER_STORE_SHORT : MUBUF_Real_AllAddr_vi <0x1a>;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 0f7764906527d0..5b02f9bf80d3fc 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -9825,6 +9825,22 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
: HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
: AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
break;
+ case 12:
+ if (!Subtarget->hasLDSLoadB96_B128())
+ return SDValue();
+ Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORDX3_LDS_BOTHEN
+ : AMDGPU::BUFFER_LOAD_DWORDX3_LDS_IDXEN
+ : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORDX3_LDS_OFFEN
+ : AMDGPU::BUFFER_LOAD_DWORDX3_LDS_OFFSET;
+ break;
+ case 16:
+ if (!Subtarget->hasLDSLoadB96_B128())
+ return SDValue();
+ Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORDX4_LDS_BOTHEN
+ : AMDGPU::BUFFER_LOAD_DWORDX4_LDS_IDXEN
+ : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORDX4_LDS_OFFEN
+ : AMDGPU::BUFFER_LOAD_DWORDX4_LDS_OFFSET;
+ break;
}
SDValue M0Val = copyToM0(DAG, Chain, DL, Op.getOperand(3));
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.lds.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.lds.gfx950.ll
index b7819ea0431588..8f67375a09cb72 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.lds.gfx950.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.lds.gfx950.ll
@@ -2,6 +2,14 @@
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -check-prefixes=GFX950,GFX950-SDAG %s
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -check-prefixes=GFX950,GFX950-GISEL %s
+; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx940 -filetype=null < %s 2>&1 | FileCheck -check-prefix=ERR-SDAG %s
+; RUN: not --crash llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx940 -filetype=null < %s 2>&1 | FileCheck -check-prefix=ERR-GISEL %s
+
+; ERR-SDAG: LLVM ERROR: Cannot select: intrinsic %llvm.amdgcn.global.load.lds
+
+; ERR-GISEL: LLVM ERROR: cannot select: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.global.load.lds),
+
+
declare void @llvm.amdgcn.global.load.lds(ptr addrspace(1) nocapture %gptr, ptr addrspace(3) nocapture %lptr, i32 %size, i32 %offset, i32 %aux)
;---------------------------------------------------------------------y
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.lds.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.lds.gfx950.ll
new file mode 100644
index 00000000000000..58b1d0da4a5f35
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.lds.gfx950.ll
@@ -0,0 +1,176 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -check-prefixes=GFX950,GFX950-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -check-prefixes=GFX950,GFX950-GISEL %s
+; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx940 -filetype=null < %s 2>&1 | FileCheck -check-prefix=ERR-SDAG %s
+; RUN: not --crash llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx940 -filetype=null < %s 2>&1 | FileCheck -check-prefix=ERR-GISEL %s
+
+; FIXME: Not a great error
+; ERR-SDAG: LLVM ERROR: Do not know how to expand this operator's operand!
+; ERR-GISEL: LLVM ERROR: cannot select: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.raw.ptr.buffer.load.lds),
+
+declare void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) nocapture, i32 %size, i32 %voffset, i32 %soffset, i32 %offset, i32 %aux)
+
+;---------------------------------------------------------------------y
+; dwordx3
+;---------------------------------------------------------------------
+
+define amdgpu_ps float @buffer_load_lds_dwordx3(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds) {
+; GFX950-LABEL: buffer_load_lds_dwordx3:
+; GFX950: ; %bb.0: ; %main_body
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dword off, s[0:3], 0 lds
+; GFX950-NEXT: buffer_load_dword off, s[0:3], 0 offset:4 sc0 lds
+; GFX950-NEXT: buffer_load_dword off, s[0:3], 0 offset:8 nt lds
+; GFX950-NEXT: v_mov_b32_e32 v0, s4
+; GFX950-NEXT: s_waitcnt vmcnt(0)
+; GFX950-NEXT: ds_read_b32 v0, v0
+; GFX950-NEXT: s_waitcnt lgkmcnt(0)
+; GFX950-NEXT: ; return to shader part epilog
+main_body:
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 4, i32 0, i32 0, i32 0, i32 0)
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 4, i32 0, i32 0, i32 4, i32 1)
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 4, i32 0, i32 0, i32 8, i32 2)
+ %res = load float, ptr addrspace(3) %lds
+ ret float %res
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx3_imm_voffset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds) {
+; GFX950-LABEL: buffer_load_lds_dwordx3_imm_voffset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: v_mov_b32_e32 v0, 0x800
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx3 v0, s[0:3], 0 offen lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 12, i32 2048, i32 0, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx3_v_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %voffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx3_v_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx3 v0, s[0:3], 0 offen lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 12, i32 %voffset, i32 0, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx3_s_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 inreg %soffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx3_s_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx3 off, s[0:3], s5 lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 12, i32 0, i32 %soffset, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx3_vs_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %voffset, i32 inreg %soffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx3_vs_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx3 v0, s[0:3], s5 offen lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 12, i32 %voffset, i32 %soffset, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx3_vs_imm_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %voffset, i32 inreg %soffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx3_vs_imm_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx3 v0, s[0:3], s5 offen offset:2048 lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 12, i32 %voffset, i32 %soffset, i32 2048, i32 0)
+ ret void
+}
+
+;---------------------------------------------------------------------y
+; dwordx4
+;---------------------------------------------------------------------
+
+define amdgpu_ps float @buffer_load_lds_dwordx4(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds) {
+; GFX950-LABEL: buffer_load_lds_dwordx4:
+; GFX950: ; %bb.0: ; %main_body
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dword off, s[0:3], 0 lds
+; GFX950-NEXT: buffer_load_dword off, s[0:3], 0 offset:4 sc0 lds
+; GFX950-NEXT: buffer_load_dword off, s[0:3], 0 offset:8 nt lds
+; GFX950-NEXT: v_mov_b32_e32 v0, s4
+; GFX950-NEXT: s_waitcnt vmcnt(0)
+; GFX950-NEXT: ds_read_b32 v0, v0
+; GFX950-NEXT: s_waitcnt lgkmcnt(0)
+; GFX950-NEXT: ; return to shader part epilog
+main_body:
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 4, i32 0, i32 0, i32 0, i32 0)
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 4, i32 0, i32 0, i32 4, i32 1)
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 4, i32 0, i32 0, i32 8, i32 2)
+ %res = load float, ptr addrspace(3) %lds
+ ret float %res
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx4_imm_voffset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds) {
+; GFX950-LABEL: buffer_load_lds_dwordx4_imm_voffset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: v_mov_b32_e32 v0, 0x800
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx4 v0, s[0:3], 0 offen lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 16, i32 2048, i32 0, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx4_v_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %voffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx4_v_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx4 v0, s[0:3], 0 offen lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 16, i32 %voffset, i32 0, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx4_s_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 inreg %soffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx4_s_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx4 off, s[0:3], s5 lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 16, i32 0, i32 %soffset, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx4_vs_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %voffset, i32 inreg %soffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx4_vs_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx4 v0, s[0:3], s5 offen lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 16, i32 %voffset, i32 %soffset, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx4_vs_imm_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %voffset, i32 inreg %soffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx4_vs_imm_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx4 v0, s[0:3], s5 offen offset:2048 lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 16, i32 %voffset, i32 %soffset, i32 2048, i32 0)
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX950-GISEL: {{.*}}
+; GFX950-SDAG: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.lds.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.lds.gfx950.ll
new file mode 100644
index 00000000000000..cfe9545b074e3c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.lds.gfx950.ll
@@ -0,0 +1,196 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -check-prefixes=GFX950,GFX950-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -check-prefixes=GFX950,GFX950-GISEL %s
+; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx940 -filetype=null < %s 2>&1 | FileCheck -check-prefix=ERR-SDAG %s
+; RUN: not --crash llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx940 -filetype=null < %s 2>&1 | FileCheck -check-prefix=ERR-GISEL %s
+
+; ERR-SDAG: LLVM ERROR: Do not know how to expand this operator's operand!
+; ERR-GISEL: LLVM ERROR: cannot select: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.struct.ptr.buffer.load.lds),
+
+declare void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) nocapture, i32 %size, i32 %vindex, i32 %voffset, i32 %soffset, i32 %offset, i32 %aux)
+
+;---------------------------------------------------------------------y
+; dwordx3
+;---------------------------------------------------------------------
+
+define amdgpu_ps float @buffer_load_lds_dwordx3(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds) {
+; GFX950-SDAG-LABEL: buffer_load_lds_dwordx3:
+; GFX950-SDAG: ; %bb.0:
+; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, 8
+; GFX950-SDAG-NEXT: s_mov_b32 m0, s4
+; GFX950-SDAG-NEXT: s_nop 0
+; GFX950-SDAG-NEXT: buffer_load_dwordx3 v0, s[0:3], 0 idxen lds
+; GFX950-SDAG-NEXT: buffer_load_dwordx3 v0, s[0:3], 0 idxen offset:4 sc0 lds
+; GFX950-SDAG-NEXT: buffer_load_dwordx3 v0, s[0:3], 0 idxen offset:8 nt lds
+; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, s4
+; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX950-SDAG-NEXT: ds_read_b32 v0, v0
+; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX950-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX950-GISEL-LABEL: buffer_load_lds_dwordx3:
+; GFX950-GISEL: ; %bb.0:
+; GFX950-GISEL-NEXT: s_mov_b32 m0, s4
+; GFX950-GISEL-NEXT: v_mov_b32_e32 v0, 8
+; GFX950-GISEL-NEXT: buffer_load_dwordx3 v0, s[0:3], 0 idxen lds
+; GFX950-GISEL-NEXT: buffer_load_dwordx3 v0, s[0:3], 0 idxen offset:4 sc0 lds
+; GFX950-GISEL-NEXT: buffer_load_dwordx3 v0, s[0:3], 0 idxen offset:8 nt lds
+; GFX950-GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX950-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX950-GISEL-NEXT: ds_read_b32 v0, v0
+; GFX950-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX950-GISEL-NEXT: ; return to shader part epilog
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 12, i32 8, i32 0, i32 0, i32 0, i32 0)
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 12, i32 8, i32 0, i32 0, i32 4, i32 1)
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 12, i32 8, i32 0, i32 0, i32 8, i32 2)
+ %res = load float, ptr addrspace(3) %lds
+ ret float %res
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx3_imm_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %vindex) {
+; GFX950-LABEL: buffer_load_lds_dwordx3_imm_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx3 v0, s[0:3], 0 idxen offset:2048 lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 12, i32 %vindex, i32 0, i32 0, i32 2048, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx3_v_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %vindex, i32 %voffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx3_v_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx3 v[0:1], s[0:3], 0 idxen offen lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 12, i32 %vindex, i32 %voffset, i32 0, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx3_s_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %vindex, i32 inreg %soffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx3_s_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx3 v0, s[0:3], s5 idxen lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 12, i32 %vindex, i32 0, i32 %soffset, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx3_vs_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx3_vs_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx3 v[0:1], s[0:3], s5 idxen offen lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 12, i32 %vindex, i32 %voffset, i32 %soffset, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx3_vs_imm_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx3_vs_imm_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx3 v[0:1], s[0:3], s5 idxen offen offset:2048 lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 12, i32 %vindex, i32 %voffset, i32 %soffset, i32 2048, i32 0)
+ ret void
+}
+
+;---------------------------------------------------------------------y
+; dwordx4
+;---------------------------------------------------------------------
+
+define amdgpu_ps float @buffer_load_lds_dwordx4(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds) {
+; GFX950-SDAG-LABEL: buffer_load_lds_dwordx4:
+; GFX950-SDAG: ; %bb.0:
+; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, 8
+; GFX950-SDAG-NEXT: s_mov_b32 m0, s4
+; GFX950-SDAG-NEXT: s_nop 0
+; GFX950-SDAG-NEXT: buffer_load_dwordx4 v0, s[0:3], 0 idxen lds
+; GFX950-SDAG-NEXT: buffer_load_dwordx4 v0, s[0:3], 0 idxen offset:4 sc0 lds
+; GFX950-SDAG-NEXT: buffer_load_dwordx4 v0, s[0:3], 0 idxen offset:8 nt lds
+; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, s4
+; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX950-SDAG-NEXT: ds_read_b32 v0, v0
+; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX950-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX950-GISEL-LABEL: buffer_load_lds_dwordx4:
+; GFX950-GISEL: ; %bb.0:
+; GFX950-GISEL-NEXT: s_mov_b32 m0, s4
+; GFX950-GISEL-NEXT: v_mov_b32_e32 v0, 8
+; GFX950-GISEL-NEXT: buffer_load_dwordx4 v0, s[0:3], 0 idxen lds
+; GFX950-GISEL-NEXT: buffer_load_dwordx4 v0, s[0:3], 0 idxen offset:4 sc0 lds
+; GFX950-GISEL-NEXT: buffer_load_dwordx4 v0, s[0:3], 0 idxen offset:8 nt lds
+; GFX950-GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX950-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX950-GISEL-NEXT: ds_read_b32 v0, v0
+; GFX950-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX950-GISEL-NEXT: ; return to shader part epilog
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 16, i32 8, i32 0, i32 0, i32 0, i32 0)
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 16, i32 8, i32 0, i32 0, i32 4, i32 1)
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 16, i32 8, i32 0, i32 0, i32 8, i32 2)
+ %res = load float, ptr addrspace(3) %lds
+ ret float %res
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx4_imm_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %vindex) {
+; GFX950-LABEL: buffer_load_lds_dwordx4_imm_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx4 v0, s[0:3], 0 idxen offset:2048 lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 16, i32 %vindex, i32 0, i32 0, i32 2048, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx4_v_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %vindex, i32 %voffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx4_v_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx4 v[0:1], s[0:3], 0 idxen offen lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 16, i32 %vindex, i32 %voffset, i32 0, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx4_s_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %vindex, i32 inreg %soffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx4_s_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx4 v0, s[0:3], s5 idxen lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 16, i32 %vindex, i32 0, i32 %soffset, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx4_vs_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx4_vs_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx4 v[0:1], s[0:3], s5 idxen offen lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 16, i32 %vindex, i32 %voffset, i32 %soffset, i32 0, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx4_vs_imm_offset(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) inreg %lds, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+; GFX950-LABEL: buffer_load_lds_dwordx4_vs_imm_offset:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_mov_b32 m0, s4
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: buffer_load_dwordx4 v[0:1], s[0:3], s5 idxen offen offset:2048 lds
+; GFX950-NEXT: s_endpgm
+ call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr addrspace(3) %lds, i32 16, i32 %vindex, i32 %voffset, i32 %soffset, i32 2048, i32 0)
+ ret void
+}
diff --git a/llvm/test/MC/AMDGPU/mubuf-gfx950.s b/llvm/test/MC/AMDGPU/mubuf-gfx950.s
new file mode 100644
index 00000000000000..0ba6f2ca4f6c4e
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/mubuf-gfx950.s
@@ -0,0 +1,32 @@
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx950 -show-encoding %s | FileCheck -check-prefix=GFX950 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx940 -show-encoding %s 2>&1 | FileCheck -check-prefix=ERR %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx803 -show-encoding %s 2>&1 | FileCheck -check-prefix=ERR %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1030 -show-encoding %s 2>&1 | FileCheck -check-prefix=ERR %s
+
+// FIXME: Bad diagnostics on unsupported subtarget
+
+// GFX950: buffer_load_dwordx3 off, s[8:11], s3 lds ; encoding: [0x00,0x00,0x59,0xe0,0x00,0x00,0x02,0x03]
+// ERR: :[[@LINE+1]]:21: error: invalid operand for instruction
+buffer_load_dwordx3 off, s[8:11], s3 lds
+
+// GFX950: buffer_load_dwordx3 off, s[8:11], s3 offset:4095 lds ; encoding: [0xff,0x0f,0x59,0xe0,0x00,0x00,0x02,0x03]
+// ERR: :[[@LINE+1]]:38: error: not a valid operand
+buffer_load_dwordx3 off, s[8:11], s3 offset:4095 lds
+
+// GFX950: buffer_load_dwordx3 v0, s[8:11], s101 offen lds ; encoding: [0x00,0x10,0x59,0xe0,0x00,0x00,0x02,0x65]
+// ERR: :[[@LINE+1]]:39: error: invalid operand for instruction
+buffer_load_dwordx3 v0, s[8:11], s101 offen lds
+
+
+
+// GFX950: buffer_load_dwordx4 off, s[8:11], s3 lds ; encoding: [0x00,0x00,0x5d,0xe0,0x00,0x00,0x02,0x03]
+// ERR: :[[@LINE+1]]:21: error: invalid operand for instruction
+buffer_load_dwordx4 off, s[8:11], s3 lds
+
+// GFX950: buffer_load_dwordx4 off, s[8:11], s3 offset:4095 lds ; encoding: [0xff,0x0f,0x5d,0xe0,0x00,0x00,0x02,0x03]
+// ERR: :[[@LINE+1]]:38: error: not a valid operand
+buffer_load_dwordx4 off, s[8:11], s3 offset:4095 lds
+
+// GFX950: buffer_load_dwordx4 v0, s[8:11], s101 offen lds ; encoding: [0x00,0x10,0x5d,0xe0,0x00,0x00,0x02,0x65]
+// ERR: :[[@LINE+1]]:39: error: invalid operand for instruction
+buffer_load_dwordx4 v0, s[8:11], s101 offen lds
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx950.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx950.txt
index a9f28332860ee5..ce37e228f03fa3 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx950.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx950.txt
@@ -23,3 +23,22 @@
# GFX950: global_load_lds_dwordx4 v[2:3], off sc0 nt sc1 ; encoding: [0x00,0x80,0xf7,0xdf,0x02,0x00,0x7f,0x00]
0x00,0x80,0xf7,0xdf,0x02,0x00,0x7f,0x00
+
+
+# GFX950: buffer_load_dwordx3 off, s[8:11], s3 lds ; encoding: [0x00,0x00,0x59,0xe0,0x00,0x00,0x02,0x03]
+0x00,0x00,0x59,0xe0,0x00,0x00,0x02,0x03
+
+# GFX950: buffer_load_dwordx3 off, s[8:11], s3 offset:4095 lds ; encoding: [0xff,0x0f,0x59,0xe0,0x00,0x00,0x02,0x03]
+0xff,0x0f,0x59,0xe0,0x00,0x00,0x02,0x03
+
+# GFX950: buffer_load_dwordx3 v0, s[8:11], s101 offen lds ; encoding: [0x00,0x10,0x59,0xe0,0x00,0x00,0x02,0x65]
+0x00,0x10,0x59,0xe0,0x00,0x00,0x02,0x65
+
+# GFX950: buffer_load_dwordx4 off, s[8:11], s3 lds ; encoding: [0x00,0x00,0x5d,0xe0,0x00,0x00,0x02,0x03]
+0x00,0x00,0x5d,0xe0,0x00,0x00,0x02,0x03
+
+# GFX950: buffer_load_dwordx4 off, s[8:11], s3 offset:4095 lds ; encoding: [0xff,0x0f,0x5d,0xe0,0x00,0x00,0x02,0x03]
+0xff,0x0f,0x5d,0xe0,0x00,0x00,0x02,0x03
+
+# GFX950: buffer_load_dwordx4 v0, s[8:11], s101 offen lds ; encoding: [0x00,0x10,0x5d,0xe0,0x00,0x00,0x02,0x65]
+0x00,0x10,0x5d,0xe0,0x00,0x00,0x02,0x65
>From 545917cb4b1c122b3626ea8d35fa7f93a44ae27f Mon Sep 17 00:00:00 2001
From: Nathan Ridge <zeratul976 at hotmail.com>
Date: Tue, 19 Nov 2024 01:32:04 -0500
Subject: [PATCH 11/22] [clangd] Harden incomingCalls() against possible
misinterpretation of a range as pertaining to the wrong file (#111616)
`CallHierarchyIncomingCall::fromRanges` are interpreted as ranges in the
same file as the `CallHierarchyItem` representing the caller
(`CallHierarchyIncomingCall::from`).
In C/C++, it's possible for the calls to be in a different file than the caller,
as illustrated in the added test case.
With this patch, such calls are dropped, rather than their ranges being
incorrectly interpreted as pertaining to the wrong file.
---
clang-tools-extra/clangd/XRefs.cpp | 21 +++++++++++---
.../clangd/unittests/CallHierarchyTests.cpp | 29 +++++++++++++++++++
2 files changed, 46 insertions(+), 4 deletions(-)
diff --git a/clang-tools-extra/clangd/XRefs.cpp b/clang-tools-extra/clangd/XRefs.cpp
index 4fd11307857ff8..61fa66180376cd 100644
--- a/clang-tools-extra/clangd/XRefs.cpp
+++ b/clang-tools-extra/clangd/XRefs.cpp
@@ -63,6 +63,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <optional>
@@ -2275,7 +2276,7 @@ incomingCalls(const CallHierarchyItem &Item, const SymbolIndex *Index) {
// Initially store the ranges in a map keyed by SymbolID of the caller.
// This allows us to group different calls with the same caller
// into the same CallHierarchyIncomingCall.
- llvm::DenseMap<SymbolID, std::vector<Range>> CallsIn;
+ llvm::DenseMap<SymbolID, std::vector<Location>> CallsIn;
// We can populate the ranges based on a refs request only. As we do so, we
// also accumulate the container IDs into a lookup request.
LookupRequest ContainerLookup;
@@ -2285,7 +2286,7 @@ incomingCalls(const CallHierarchyItem &Item, const SymbolIndex *Index) {
elog("incomingCalls failed to convert location: {0}", Loc.takeError());
return;
}
- CallsIn[R.Container].push_back(Loc->range);
+ CallsIn[R.Container].push_back(*Loc);
ContainerLookup.IDs.insert(R.Container);
});
@@ -2294,9 +2295,21 @@ incomingCalls(const CallHierarchyItem &Item, const SymbolIndex *Index) {
Index->lookup(ContainerLookup, [&](const Symbol &Caller) {
auto It = CallsIn.find(Caller.ID);
assert(It != CallsIn.end());
- if (auto CHI = symbolToCallHierarchyItem(Caller, Item.uri.file()))
+ if (auto CHI = symbolToCallHierarchyItem(Caller, Item.uri.file())) {
+ std::vector<Range> FromRanges;
+ for (const Location &L : It->second) {
+ if (L.uri != CHI->uri) {
+ // Call location not in same file as caller.
+ // This can happen in some edge cases. There's not much we can do,
+ // since the protocol only allows returning ranges interpreted as
+ // being in the caller's file.
+ continue;
+ }
+ FromRanges.push_back(L.range);
+ }
Results.push_back(
- CallHierarchyIncomingCall{std::move(*CHI), std::move(It->second)});
+ CallHierarchyIncomingCall{std::move(*CHI), std::move(FromRanges)});
+ }
});
// Sort results by name of container.
llvm::sort(Results, [](const CallHierarchyIncomingCall &A,
diff --git a/clang-tools-extra/clangd/unittests/CallHierarchyTests.cpp b/clang-tools-extra/clangd/unittests/CallHierarchyTests.cpp
index b2278ff12735dc..8821d3aad9c784 100644
--- a/clang-tools-extra/clangd/unittests/CallHierarchyTests.cpp
+++ b/clang-tools-extra/clangd/unittests/CallHierarchyTests.cpp
@@ -491,6 +491,35 @@ TEST(CallHierarchy, HierarchyOnVar) {
fromRanges(Source.range("Callee")))));
}
+TEST(CallHierarchy, CallInDifferentFileThanCaller) {
+ Annotations Header(R"cpp(
+ #define WALDO void caller() {
+ )cpp");
+ Annotations Source(R"cpp(
+ void call^ee();
+ WALDO
+ callee();
+ }
+ )cpp");
+ auto TU = TestTU::withCode(Source.code());
+ TU.HeaderCode = Header.code();
+ auto AST = TU.build();
+ auto Index = TU.index();
+
+ std::vector<CallHierarchyItem> Items =
+ prepareCallHierarchy(AST, Source.point(), testPath(TU.Filename));
+ ASSERT_THAT(Items, ElementsAre(withName("callee")));
+
+ auto Incoming = incomingCalls(Items[0], Index.get());
+
+ // The only call site is in the source file, which is a different file from
+ // the declaration of the function containing the call, which is in the
+ // header. The protocol does not allow us to represent such calls, so we drop
+ // them. (The call hierarchy item itself is kept.)
+ EXPECT_THAT(Incoming,
+ ElementsAre(AllOf(from(withName("caller")), fromRanges())));
+}
+
} // namespace
} // namespace clangd
} // namespace clang
>From 44a41b0660912a90be903a843e8b6f234fa0a2be Mon Sep 17 00:00:00 2001
From: Nathan Ridge <zeratul976 at hotmail.com>
Date: Tue, 19 Nov 2024 01:47:45 -0500
Subject: [PATCH 12/22] [clangd] Check for other clangd extension capabilities
under 'experimental' (#116531)
This is a follow-up to PR114699, with the same motivation: to support
clients which only support adding custom (language-specific or
server-specific) capabilities under 'experimental'.
---
clang-tools-extra/clangd/Protocol.cpp | 29 +++++++++++++++++++++++++++
clang-tools-extra/clangd/Protocol.h | 3 +++
2 files changed, 32 insertions(+)
diff --git a/clang-tools-extra/clangd/Protocol.cpp b/clang-tools-extra/clangd/Protocol.cpp
index 761f96846d4538..05c8041df7de75 100644
--- a/clang-tools-extra/clangd/Protocol.cpp
+++ b/clang-tools-extra/clangd/Protocol.cpp
@@ -511,6 +511,35 @@ bool fromJSON(const llvm::json::Value &Params, ClientCapabilities &R,
if (auto EditsNearCursor = Completion->getBoolean("editsNearCursor"))
R.CompletionFixes |= *EditsNearCursor;
}
+ if (auto *References = TextDocument->getObject("references")) {
+ if (auto ContainerSupport = References->getBoolean("container")) {
+ R.ReferenceContainer |= *ContainerSupport;
+ }
+ }
+ if (auto *Diagnostics = TextDocument->getObject("publishDiagnostics")) {
+ if (auto CodeActions = Diagnostics->getBoolean("codeActionsInline")) {
+ R.DiagnosticFixes |= *CodeActions;
+ }
+ }
+ if (auto *InactiveRegions =
+ TextDocument->getObject("inactiveRegionsCapabilities")) {
+ if (auto InactiveRegionsSupport =
+ InactiveRegions->getBoolean("inactiveRegions")) {
+ R.InactiveRegions |= *InactiveRegionsSupport;
+ }
+ }
+ }
+ if (auto *Window = Experimental->getObject("window")) {
+ if (auto Implicit =
+ Window->getBoolean("implicitWorkDoneProgressCreate")) {
+ R.ImplicitProgressCreation |= *Implicit;
+ }
+ }
+ if (auto *OffsetEncoding = Experimental->get("offsetEncoding")) {
+ R.offsetEncoding.emplace();
+ if (!fromJSON(*OffsetEncoding, *R.offsetEncoding,
+ P.field("offsetEncoding")))
+ return false;
}
}
diff --git a/clang-tools-extra/clangd/Protocol.h b/clang-tools-extra/clangd/Protocol.h
index 5b28095758198d..c7ef1a13e6e39e 100644
--- a/clang-tools-extra/clangd/Protocol.h
+++ b/clang-tools-extra/clangd/Protocol.h
@@ -452,6 +452,7 @@ struct ClientCapabilities {
std::optional<SymbolKindBitset> WorkspaceSymbolKinds;
/// Whether the client accepts diagnostics with codeActions attached inline.
+ /// This is a clangd extension.
/// textDocument.publishDiagnostics.codeActionsInline.
bool DiagnosticFixes = false;
@@ -475,6 +476,7 @@ struct ClientCapabilities {
/// Client supports displaying a container string for results of
/// textDocument/reference (clangd extension)
+ /// textDocument.references.container
bool ReferenceContainer = false;
/// Client supports hierarchical document symbols.
@@ -563,6 +565,7 @@ struct ClientCapabilities {
/// Whether the client supports the textDocument/inactiveRegions
/// notification. This is a clangd extension.
+ /// textDocument.inactiveRegionsCapabilities.inactiveRegions
bool InactiveRegions = false;
};
bool fromJSON(const llvm::json::Value &, ClientCapabilities &,
>From 2e0a3c281b31eeffb1c12b53360f22760e246af2 Mon Sep 17 00:00:00 2001
From: Carlos Alberto Enciso <Carlos.Enciso at sony.com>
Date: Tue, 19 Nov 2024 07:10:36 +0000
Subject: [PATCH 13/22] [DebugInfo] Correct an overly-restrictive REQUIRES
clause. (#116429)
Include a regular expression in the 'REQUIRES' clause, to run
the test on all matching targets (x86_64 *linux*).
The original patch restricted to test just to 'x86_64-linux'
https://github.com/llvm/llvm-project/pull/116327
---
llvm/test/DebugInfo/Generic/artificial-static-member.ll | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/test/DebugInfo/Generic/artificial-static-member.ll b/llvm/test/DebugInfo/Generic/artificial-static-member.ll
index 5c247d6959bf7b..08f15a226a87cf 100644
--- a/llvm/test/DebugInfo/Generic/artificial-static-member.ll
+++ b/llvm/test/DebugInfo/Generic/artificial-static-member.ll
@@ -1,4 +1,4 @@
-; REQUIRES: x86_64-linux
+; REQUIRES: target={{x86_64-.*-linux.*}}
; RUN: llc -O0 -filetype=obj < %s | \
; RUN: llvm-dwarfdump --debug-info - | FileCheck %s
>From 0488d1774b197513cf91d973e103f4e7de293c00 Mon Sep 17 00:00:00 2001
From: Florian Mayer <fmayer at google.com>
Date: Mon, 18 Nov 2024 23:18:54 -0800
Subject: [PATCH 14/22] Reland "[Sanitizers] Intercept timer_create" (#113710)
(#116717)
Original commit 2ec5c69b6872b8b474f3d37b9125d3d57d144d1b only
intercepted timer_create.
Because of how versioned libc symbols work, this could cause problems
where a newer `timer_create`
was used, and the result would be used by an older version. This would
cause crashes. This is why we
need to intercept all of the related functions.
Addresses https://github.com/llvm/llvm-project/issues/111847
---
.../lib/hwasan/hwasan_platform_interceptors.h | 3 +
compiler-rt/lib/msan/tests/msan_test.cpp | 28 ++++++++++
.../sanitizer_common_interceptors.inc | 56 +++++++++++++++++++
.../sanitizer_platform_interceptors.h | 3 +
.../sanitizer_platform_limits_posix.h | 14 +++++
5 files changed, 104 insertions(+)
diff --git a/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h b/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h
index d92b5105219427..e8011014c2331d 100644
--- a/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h
+++ b/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h
@@ -200,6 +200,9 @@
#undef SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID
#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID 0
+#undef SANITIZER_INTERCEPT_TIMER_CREATE
+#define SANITIZER_INTERCEPT_TIMER_CREATE 0
+
#undef SANITIZER_INTERCEPT_GETITIMER
#define SANITIZER_INTERCEPT_GETITIMER 0
diff --git a/compiler-rt/lib/msan/tests/msan_test.cpp b/compiler-rt/lib/msan/tests/msan_test.cpp
index 41b99fabe84f47..a126dd4fdd55e5 100644
--- a/compiler-rt/lib/msan/tests/msan_test.cpp
+++ b/compiler-rt/lib/msan/tests/msan_test.cpp
@@ -4881,4 +4881,32 @@ TEST(MemorySanitizer, throw_catch) {
// pass
}
}
+
+#if defined(__GLIBC__)
+TEST(MemorySanitizer, timer_create) {
+ timer_t timer;
+ EXPECT_POISONED(timer);
+ int res = timer_create(CLOCK_REALTIME, nullptr, &timer);
+ ASSERT_EQ(0, res);
+ EXPECT_NOT_POISONED(timer);
+
+ // Make sure the timer is usable.
+ struct itimerspec cur_value {};
+ cur_value.it_value.tv_sec = 1;
+ EXPECT_EQ(0, timer_settime(timer, 0, &cur_value, nullptr));
+
+ struct itimerspec read_value;
+ EXPECT_POISONED(read_value);
+ EXPECT_EQ(0, timer_gettime(timer, &read_value));
+ EXPECT_NOT_POISONED(read_value);
+
+ timer_t timer2;
+ EXPECT_POISONED(timer2);
+ // Use an invalid clock_id to make timer_create fail.
+ res = timer_create(INT_MAX, nullptr, &timer2);
+ ASSERT_EQ(-1, res);
+ EXPECT_POISONED(timer2);
+ timer_delete(timer);
+}
+#endif
} // namespace
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
index b8627f8557afe2..99fa737adfaf26 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -2289,6 +2289,61 @@ INTERCEPTOR(int, pthread_getcpuclockid, uptr thread,
#define INIT_CLOCK_GETCPUCLOCKID
#endif
+#if SANITIZER_INTERCEPT_TIMER_CREATE
+INTERCEPTOR(int, timer_create, __sanitizer_clockid_t clockid, void *sevp,
+ __sanitizer_timer_t *timer) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timer_create, clockid, sevp, timer);
+ int res = REAL(timer_create)(clockid, sevp, timer);
+ if (!res && timer) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, timer, sizeof *timer);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, timer_delete, __sanitizer_timer_t timer) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timer_delete, timer);
+ int res = REAL(timer_delete)(timer);
+ return res;
+}
+
+INTERCEPTOR(int, timer_gettime, __sanitizer_timer_t timer,
+ struct __sanitizer_itimerspec *curr_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timer_gettime, timer, curr_value);
+ int res = REAL(timer_gettime)(timer, curr_value);
+ if (!res && curr_value) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, sizeof *curr_value);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, timer_settime, __sanitizer_timer_t timer, int flags,
+ const struct __sanitizer_itimerspec *new_value,
+ struct __sanitizer_itimerspec *old_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timer_settime, timer, flags, new_value,
+ old_value);
+ int res = REAL(timer_settime)(timer, flags, new_value, old_value);
+ if (!res) {
+ if (new_value)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, sizeof *new_value);
+ if (old_value)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, sizeof *old_value);
+ }
+ return res;
+}
+
+# define INIT_TIMER_CREATE \
+ COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(timer_create, "GLIBC_2.3.3"); \
+ COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(timer_delete, "GLIBC_2.3.3"); \
+ COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(timer_gettime, "GLIBC_2.3.3"); \
+ COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(timer_settime, "GLIBC_2.3.3");
+#else
+# define INIT_TIMER_CREATE
+#endif
+
#if SANITIZER_INTERCEPT_GETITIMER
INTERCEPTOR(int, getitimer, int which, void *curr_value) {
void *ctx;
@@ -10266,6 +10321,7 @@ static void InitializeCommonInterceptors() {
INIT_SETPWENT;
INIT_CLOCK_GETTIME;
INIT_CLOCK_GETCPUCLOCKID;
+ INIT_TIMER_CREATE;
INIT_GETITIMER;
INIT_TIME;
INIT_GLOB;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
index 7f9d4998bf757c..ecc768d2e543a6 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
@@ -256,6 +256,9 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment,
(SI_FREEBSD || SI_NETBSD || SI_LINUX || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID \
(SI_LINUX || SI_FREEBSD || SI_NETBSD)
+// TODO: This should be SI_POSIX, adding glibc first until I have time
+// to verify all timer_t typedefs on other platforms.
+#define SANITIZER_INTERCEPT_TIMER_CREATE SI_GLIBC
#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
#define SANITIZER_INTERCEPT_TIME SI_POSIX
#define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS)
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
index e8c81aa8e28163..7d98f8e9a9d801 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -389,6 +389,16 @@ typedef long __sanitizer_time_t;
typedef long __sanitizer_suseconds_t;
+struct __sanitizer_timespec {
+ __sanitizer_time_t tv_sec; /* seconds */
+ u64 tv_nsec; /* nanoseconds */
+};
+
+struct __sanitizer_itimerspec {
+ struct __sanitizer_timespec it_interval; /* timer period */
+ struct __sanitizer_timespec it_value; /* timer expiration */
+};
+
struct __sanitizer_timeval {
__sanitizer_time_t tv_sec;
__sanitizer_suseconds_t tv_usec;
@@ -1517,6 +1527,10 @@ extern const int si_SEGV_ACCERR;
#define SIGACTION_SYMNAME sigaction
+# if SANITIZER_LINUX
+typedef void *__sanitizer_timer_t;
+# endif
+
#endif // SANITIZER_LINUX || SANITIZER_APPLE
#endif
>From 3b162f73d8027dcd8261666a40e9bdfb40f4dacc Mon Sep 17 00:00:00 2001
From: Daniil Kovalev <dkovalev at accesssoftek.com>
Date: Tue, 19 Nov 2024 10:20:15 +0300
Subject: [PATCH 15/22] [PAC][clang] Add signed GOT cc1 flag (#96160)
Add `-fptrauth-elf-got` clang cc1 flag and set `ptrauth_elf_got`
preprocessor feature and `PointerAuthELFGOT` LangOption correspondingly.
No additional checks like ensuring OS binary format is ELF are
performed: it should be done on clang driver level when a pauth-enabled
environment implying signed GOT enabled is requested.
If the cc1 flag is passed, "ptrauth-elf-got" IR module flag is set.
---
clang/include/clang/Basic/Features.def | 1 +
clang/include/clang/Driver/Options.td | 1 +
clang/lib/CodeGen/CodeGenModule.cpp | 3 +++
clang/lib/Frontend/CompilerInvocation.cpp | 3 +++
clang/test/CodeGen/AArch64/elf-pauthabi.c | 11 +++++---
clang/test/CodeGen/ptrauth-module-flags.c | 8 ++++++
clang/test/Preprocessor/ptrauth_feature.c | 31 +++++++++++++++--------
7 files changed, 45 insertions(+), 13 deletions(-)
create mode 100644 clang/test/CodeGen/ptrauth-module-flags.c
diff --git a/clang/include/clang/Basic/Features.def b/clang/include/clang/Basic/Features.def
index 7f5d26118bdc71..9088c867d53ce4 100644
--- a/clang/include/clang/Basic/Features.def
+++ b/clang/include/clang/Basic/Features.def
@@ -116,6 +116,7 @@ FEATURE(ptrauth_function_pointer_type_discrimination, LangOpts.PointerAuthFuncti
FEATURE(ptrauth_indirect_gotos, LangOpts.PointerAuthIndirectGotos)
FEATURE(ptrauth_init_fini, LangOpts.PointerAuthInitFini)
FEATURE(ptrauth_init_fini_address_discrimination, LangOpts.PointerAuthInitFiniAddressDiscrimination)
+FEATURE(ptrauth_elf_got, LangOpts.PointerAuthELFGOT)
EXTENSION(swiftcc,
PP.getTargetInfo().checkCallingConvention(CC_Swift) ==
clang::TargetInfo::CCCR_OK)
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index d7230dd7272fd6..f2f9c20c9bc264 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -4356,6 +4356,7 @@ defm ptrauth_indirect_gotos : OptInCC1FFlag<"ptrauth-indirect-gotos",
defm ptrauth_init_fini : OptInCC1FFlag<"ptrauth-init-fini", "Enable signing of function pointers in init/fini arrays">;
defm ptrauth_init_fini_address_discrimination : OptInCC1FFlag<"ptrauth-init-fini-address-discrimination",
"Enable address discrimination of function pointers in init/fini arrays">;
+defm ptrauth_elf_got : OptInCC1FFlag<"ptrauth-elf-got", "Enable authentication of pointers from GOT (ELF only)">;
}
def fenable_matrix : Flag<["-"], "fenable-matrix">, Group<f_Group>,
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 508f53482d4e1f..4f456981cf0de8 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -1213,6 +1213,9 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Min,
"sign-return-address-with-bkey", 1);
+ if (LangOpts.PointerAuthELFGOT)
+ getModule().addModuleFlag(llvm::Module::Min, "ptrauth-elf-got", 1);
+
if (getTriple().isOSLinux()) {
assert(getTriple().isOSBinFormatELF());
using namespace llvm::ELF;
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index e3145dcacf58d1..3dd94c31b2bc7a 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -3452,6 +3452,8 @@ static void GeneratePointerAuthArgs(const LangOptions &Opts,
GenerateArg(Consumer, OPT_fptrauth_init_fini);
if (Opts.PointerAuthInitFiniAddressDiscrimination)
GenerateArg(Consumer, OPT_fptrauth_init_fini_address_discrimination);
+ if (Opts.PointerAuthELFGOT)
+ GenerateArg(Consumer, OPT_fptrauth_elf_got);
}
static void ParsePointerAuthArgs(LangOptions &Opts, ArgList &Args,
@@ -3472,6 +3474,7 @@ static void ParsePointerAuthArgs(LangOptions &Opts, ArgList &Args,
Opts.PointerAuthInitFini = Args.hasArg(OPT_fptrauth_init_fini);
Opts.PointerAuthInitFiniAddressDiscrimination =
Args.hasArg(OPT_fptrauth_init_fini_address_discrimination);
+ Opts.PointerAuthELFGOT = Args.hasArg(OPT_fptrauth_elf_got);
}
/// Check if input file kind and language standard are compatible.
diff --git a/clang/test/CodeGen/AArch64/elf-pauthabi.c b/clang/test/CodeGen/AArch64/elf-pauthabi.c
index 023fa8c18e1300..b176f708db85b4 100644
--- a/clang/test/CodeGen/AArch64/elf-pauthabi.c
+++ b/clang/test/CodeGen/AArch64/elf-pauthabi.c
@@ -1,5 +1,3 @@
-//// TODO: also test with -fptrauth-elf-got when the driver flag is supported
-
// RUN: %clang_cc1 -triple aarch64-linux -emit-llvm -o - \
// RUN: -fptrauth-intrinsics \
// RUN: -fptrauth-calls \
@@ -9,6 +7,7 @@
// RUN: -fptrauth-vtable-pointer-type-discrimination \
// RUN: -fptrauth-init-fini \
// RUN: -fptrauth-init-fini-address-discrimination \
+// RUN: -fptrauth-elf-got \
// RUN: -fptrauth-indirect-gotos \
// RUN: -fptrauth-type-info-vtable-pointer-discrimination \
// RUN: -fptrauth-function-pointer-type-discrimination %s | \
@@ -42,6 +41,9 @@
// RUN: -fptrauth-calls -fptrauth-init-fini -fptrauth-init-fini-address-discrimination %s | \
// RUN: FileCheck %s --check-prefix=INITFINIADDR
+// RUN: %clang_cc1 -triple aarch64-linux -emit-llvm -o - \
+// RUN: -fptrauth-elf-got %s | FileCheck %s --check-prefix=ELFGOT
+
// RUN: %clang_cc1 -triple aarch64-linux -emit-llvm -o - \
// RUN: -fptrauth-indirect-gotos %s | FileCheck %s --check-prefix=GOTOS
@@ -54,7 +56,7 @@
// RUN: FileCheck %s --check-prefix=FPTRTYPE
// ALL: !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
-// ALL: !{i32 1, !"aarch64-elf-pauthabi-version", i32 3839}
+// ALL: !{i32 1, !"aarch64-elf-pauthabi-version", i32 4095}
// INTRIN: !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
// INTRIN: !{i32 1, !"aarch64-elf-pauthabi-version", i32 1}
@@ -80,6 +82,9 @@
// INITFINIADDR: !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
// INITFINIADDR: !{i32 1, !"aarch64-elf-pauthabi-version", i32 194}
+// ELFGOT: !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
+// ELFGOT: !{i32 1, !"aarch64-elf-pauthabi-version", i32 256}
+
// GOTOS: !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
// GOTOS: !{i32 1, !"aarch64-elf-pauthabi-version", i32 512}
diff --git a/clang/test/CodeGen/ptrauth-module-flags.c b/clang/test/CodeGen/ptrauth-module-flags.c
new file mode 100644
index 00000000000000..5a7e9a7c2a36fe
--- /dev/null
+++ b/clang/test/CodeGen/ptrauth-module-flags.c
@@ -0,0 +1,8 @@
+// RUN: %clang_cc1 -triple aarch64-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=OFF
+// RUN: %clang_cc1 -triple aarch64-linux-gnu -fptrauth-elf-got -emit-llvm %s -o - | FileCheck %s --check-prefix=ELFGOT
+
+// ELFGOT: !llvm.module.flags = !{
+// ELFGOT-SAME: !1
+// ELFGOT: !1 = !{i32 8, !"ptrauth-elf-got", i32 1}
+
+// OFF-NOT: "ptrauth-
diff --git a/clang/test/Preprocessor/ptrauth_feature.c b/clang/test/Preprocessor/ptrauth_feature.c
index 2a3edc23f47532..a440791d6cc695 100644
--- a/clang/test/Preprocessor/ptrauth_feature.c
+++ b/clang/test/Preprocessor/ptrauth_feature.c
@@ -2,34 +2,37 @@
//// For example, -fptrauth-init-fini will not affect codegen without -fptrauth-calls, but the preprocessor feature would be set anyway.
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-intrinsics | \
-// RUN: FileCheck %s --check-prefixes=INTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=INTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS,NOELFGOT
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-calls | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,CALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,CALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS,NOELFGOT
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-returns | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,RETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,RETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS,NOELFGOT
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-vtable-pointer-address-discrimination | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,VPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,VPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS,NOELFGOT
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-vtable-pointer-type-discrimination | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,VPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,VPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS,NOELFGOT
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-type-info-vtable-pointer-discrimination | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,TYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,TYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS,NOELFGOT
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-function-pointer-type-discrimination | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,FUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,FUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS,NOELFGOT
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-init-fini | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,INITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,INITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS,NOELFGOT
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-init-fini-address-discrimination | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,INITFINI_ADDR_DISCR,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,INITFINI_ADDR_DISCR,NOGOTOS,NOELFGOT
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-indirect-gotos | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,GOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,GOTOS,NOELFGOT
+
+// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-elf-got | \
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS,ELFGOT
#if __has_feature(ptrauth_intrinsics)
// INTRIN: has_ptrauth_intrinsics
@@ -119,3 +122,11 @@ void has_ptrauth_indirect_gotos() {}
// NOGOTOS: no_ptrauth_indirect_gotos
void no_ptrauth_indirect_gotos() {}
#endif
+
+#if __has_feature(ptrauth_elf_got)
+// ELFGOT: has_ptrauth_elf_got
+void has_ptrauth_elf_got() {}
+#else
+// NOELFGOT: no_ptrauth_elf_got
+void no_ptrauth_elf_got() {}
+#endif
>From 456e60904b7b9de0a2bfabdac37ce9b8ac054750 Mon Sep 17 00:00:00 2001
From: Matthias Springer <me at m-sp.org>
Date: Tue, 19 Nov 2024 16:31:43 +0900
Subject: [PATCH 16/22] [mlir][Transforms][NFC] Dialect Conversion: Delete dead
code from `ConversionValueMapping` (#116758)
---
mlir/lib/Transforms/Utils/DialectConversion.cpp | 13 -------------
1 file changed, 13 deletions(-)
diff --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp
index 5b2cfd370900a8..42fe5b925654a1 100644
--- a/mlir/lib/Transforms/Utils/DialectConversion.cpp
+++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp
@@ -101,11 +101,6 @@ struct ConversionValueMapping {
mapping.map(oldVal, newVal);
}
- /// Try to map a value to the one provided. Returns false if a transitive
- /// mapping from the new value to the old value already exists, true if the
- /// map was updated.
- bool tryMap(Value oldVal, Value newVal);
-
/// Drop the last mapping for the given value.
void erase(Value value) { mapping.erase(value); }
@@ -149,14 +144,6 @@ Value ConversionValueMapping::lookupOrNull(Value from, Type desiredType) const {
return result;
}
-bool ConversionValueMapping::tryMap(Value oldVal, Value newVal) {
- for (Value it = newVal; it; it = mapping.lookupOrNull(it))
- if (it == oldVal)
- return false;
- map(oldVal, newVal);
- return true;
-}
-
//===----------------------------------------------------------------------===//
// Rewriter and Translation State
//===----------------------------------------------------------------------===//
>From 55068dc3b7725f24de82dd4510162865c91a4f5e Mon Sep 17 00:00:00 2001
From: Pavel Labath <pavel at labath.sk>
Date: Tue, 19 Nov 2024 08:37:51 +0100
Subject: [PATCH 17/22] [lldb] Add timed callbacks to the MainLoop class
(#112895)
The motivating use case is being able to "time out" certain operations
(by adding a timed callback which will force the termination of the
loop), but the design is flexible enough to accomodate other use cases
as well (e.g. running a periodic task in the background).
The implementation builds on the existing "pending callback" mechanism,
by associating a time point with each callback -- every time the loop
wakes up, it runs all of the callbacks which are past their point, and
it also makes sure to sleep only until the next callback is scheduled to
run.
I've done some renaming as names like "TriggerPendingCallbacks" were no
longer accurate -- the function may no longer cause any callbacks to be
called (it may just cause the main loop thread to recalculate the time
it wants to sleep).
---
lldb/include/lldb/Host/MainLoopBase.h | 32 ++++++--
lldb/include/lldb/Host/posix/MainLoopPosix.h | 6 +-
.../lldb/Host/windows/MainLoopWindows.h | 4 +-
lldb/source/Host/common/MainLoopBase.cpp | 40 +++++++---
lldb/source/Host/posix/MainLoopPosix.cpp | 79 ++++++++++++-------
lldb/source/Host/windows/MainLoopWindows.cpp | 37 ++++++---
lldb/unittests/Host/MainLoopTest.cpp | 64 +++++++++++++--
7 files changed, 194 insertions(+), 68 deletions(-)
diff --git a/lldb/include/lldb/Host/MainLoopBase.h b/lldb/include/lldb/Host/MainLoopBase.h
index 7365ee7a65ee64..be9a2676e7443e 100644
--- a/lldb/include/lldb/Host/MainLoopBase.h
+++ b/lldb/include/lldb/Host/MainLoopBase.h
@@ -13,8 +13,10 @@
#include "lldb/Utility/Status.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/ErrorHandling.h"
+#include <chrono>
#include <functional>
#include <mutex>
+#include <queue>
namespace lldb_private {
@@ -38,6 +40,9 @@ class MainLoopBase {
class ReadHandle;
public:
+ using TimePoint = std::chrono::time_point<std::chrono::steady_clock,
+ std::chrono::nanoseconds>;
+
MainLoopBase() : m_terminate_request(false) {}
virtual ~MainLoopBase() = default;
@@ -52,7 +57,18 @@ class MainLoopBase {
// Add a pending callback that will be executed once after all the pending
// events are processed. The callback will be executed even if termination
// was requested.
- void AddPendingCallback(const Callback &callback);
+ void AddPendingCallback(const Callback &callback) {
+ AddCallback(callback, std::chrono::steady_clock::time_point());
+ }
+
+ // Add a callback that will be executed after a certain amount of time has
+ // passed.
+ void AddCallback(const Callback &callback, std::chrono::nanoseconds delay) {
+ AddCallback(callback, std::chrono::steady_clock::now() + delay);
+ }
+
+ // Add a callback that will be executed after a given point in time.
+ void AddCallback(const Callback &callback, TimePoint point);
// Waits for registered events and invoke the proper callbacks. Returns when
// all callbacks deregister themselves or when someone requests termination.
@@ -69,14 +85,18 @@ class MainLoopBase {
virtual void UnregisterReadObject(IOObject::WaitableHandle handle) = 0;
- // Interrupt the loop that is currently waiting for events and execute
- // the current pending callbacks immediately.
- virtual void TriggerPendingCallbacks() = 0;
+ // Interrupt the loop that is currently waiting for events.
+ virtual void Interrupt() = 0;
+
+ void ProcessCallbacks();
- void ProcessPendingCallbacks();
+ std::optional<TimePoint> GetNextWakeupTime();
std::mutex m_callback_mutex;
- std::vector<Callback> m_pending_callbacks;
+ std::priority_queue<std::pair<TimePoint, Callback>,
+ std::vector<std::pair<TimePoint, Callback>>,
+ llvm::on_first<std::greater<TimePoint>>>
+ m_callbacks;
bool m_terminate_request : 1;
private:
diff --git a/lldb/include/lldb/Host/posix/MainLoopPosix.h b/lldb/include/lldb/Host/posix/MainLoopPosix.h
index 1988dde7c65aee..e9ac798b948df9 100644
--- a/lldb/include/lldb/Host/posix/MainLoopPosix.h
+++ b/lldb/include/lldb/Host/posix/MainLoopPosix.h
@@ -54,7 +54,7 @@ class MainLoopPosix : public MainLoopBase {
void UnregisterReadObject(IOObject::WaitableHandle handle) override;
void UnregisterSignal(int signo, std::list<Callback>::iterator callback_it);
- void TriggerPendingCallbacks() override;
+ void Interrupt() override;
private:
void ProcessReadObject(IOObject::WaitableHandle handle);
@@ -88,8 +88,8 @@ class MainLoopPosix : public MainLoopBase {
llvm::DenseMap<IOObject::WaitableHandle, Callback> m_read_fds;
llvm::DenseMap<int, SignalInfo> m_signals;
- Pipe m_trigger_pipe;
- std::atomic<bool> m_triggering;
+ Pipe m_interrupt_pipe;
+ std::atomic<bool> m_interrupting = false;
#if HAVE_SYS_EVENT_H
int m_kqueue;
#endif
diff --git a/lldb/include/lldb/Host/windows/MainLoopWindows.h b/lldb/include/lldb/Host/windows/MainLoopWindows.h
index 33e179e6c1286c..3937a24645d955 100644
--- a/lldb/include/lldb/Host/windows/MainLoopWindows.h
+++ b/lldb/include/lldb/Host/windows/MainLoopWindows.h
@@ -34,7 +34,7 @@ class MainLoopWindows : public MainLoopBase {
protected:
void UnregisterReadObject(IOObject::WaitableHandle handle) override;
- void TriggerPendingCallbacks() override;
+ void Interrupt() override;
private:
void ProcessReadObject(IOObject::WaitableHandle handle);
@@ -45,7 +45,7 @@ class MainLoopWindows : public MainLoopBase {
Callback callback;
};
llvm::DenseMap<IOObject::WaitableHandle, FdInfo> m_read_fds;
- void *m_trigger_event;
+ void *m_interrupt_event;
};
} // namespace lldb_private
diff --git a/lldb/source/Host/common/MainLoopBase.cpp b/lldb/source/Host/common/MainLoopBase.cpp
index 030a4f0371681e..64a57e65849e99 100644
--- a/lldb/source/Host/common/MainLoopBase.cpp
+++ b/lldb/source/Host/common/MainLoopBase.cpp
@@ -7,27 +7,43 @@
//===----------------------------------------------------------------------===//
#include "lldb/Host/MainLoopBase.h"
+#include <chrono>
using namespace lldb;
using namespace lldb_private;
-void MainLoopBase::AddPendingCallback(const Callback &callback) {
+void MainLoopBase::AddCallback(const Callback &callback, TimePoint point) {
+ bool interrupt_needed;
{
std::lock_guard<std::mutex> lock{m_callback_mutex};
- m_pending_callbacks.push_back(callback);
+ // We need to interrupt the main thread if this callback is scheduled to
+ // execute at an earlier time than the earliest callback registered so far.
+ interrupt_needed = m_callbacks.empty() || point < m_callbacks.top().first;
+ m_callbacks.emplace(point, callback);
}
- TriggerPendingCallbacks();
+ if (interrupt_needed)
+ Interrupt();
}
-void MainLoopBase::ProcessPendingCallbacks() {
- // Move the callbacks to a local vector to avoid keeping m_pending_callbacks
- // locked throughout the calls.
- std::vector<Callback> pending_callbacks;
- {
- std::lock_guard<std::mutex> lock{m_callback_mutex};
- pending_callbacks = std::move(m_pending_callbacks);
- }
+void MainLoopBase::ProcessCallbacks() {
+ while (true) {
+ Callback callback;
+ {
+ std::lock_guard<std::mutex> lock{m_callback_mutex};
+ if (m_callbacks.empty() ||
+ std::chrono::steady_clock::now() < m_callbacks.top().first)
+ return;
+ callback = std::move(m_callbacks.top().second);
+ m_callbacks.pop();
+ }
- for (const Callback &callback : pending_callbacks)
callback(*this);
+ }
+}
+
+std::optional<MainLoopBase::TimePoint> MainLoopBase::GetNextWakeupTime() {
+ std::lock_guard<std::mutex> lock(m_callback_mutex);
+ if (m_callbacks.empty())
+ return std::nullopt;
+ return m_callbacks.top().first;
}
diff --git a/lldb/source/Host/posix/MainLoopPosix.cpp b/lldb/source/Host/posix/MainLoopPosix.cpp
index 46993aea9cb103..1715610e0f84f1 100644
--- a/lldb/source/Host/posix/MainLoopPosix.cpp
+++ b/lldb/source/Host/posix/MainLoopPosix.cpp
@@ -15,6 +15,7 @@
#include <algorithm>
#include <cassert>
#include <cerrno>
+#include <chrono>
#include <csignal>
#include <ctime>
#include <fcntl.h>
@@ -68,6 +69,30 @@ static void SignalHandler(int signo, siginfo_t *info, void *) {
(void)bytes_written;
}
+class ToTimeSpec {
+public:
+ explicit ToTimeSpec(std::optional<MainLoopPosix::TimePoint> point) {
+ using namespace std::chrono;
+
+ if (!point) {
+ m_ts_ptr = nullptr;
+ return;
+ }
+ nanoseconds dur = std::max(*point - steady_clock::now(), nanoseconds(0));
+ m_ts_ptr = &m_ts;
+ m_ts.tv_sec = duration_cast<seconds>(dur).count();
+ m_ts.tv_nsec = (dur % seconds(1)).count();
+ }
+ ToTimeSpec(const ToTimeSpec &) = delete;
+ ToTimeSpec &operator=(const ToTimeSpec &) = delete;
+
+ operator struct timespec *() { return m_ts_ptr; }
+
+private:
+ struct timespec m_ts;
+ struct timespec *m_ts_ptr;
+};
+
class MainLoopPosix::RunImpl {
public:
RunImpl(MainLoopPosix &loop);
@@ -100,8 +125,9 @@ Status MainLoopPosix::RunImpl::Poll() {
for (auto &fd : loop.m_read_fds)
EV_SET(&in_events[i++], fd.first, EVFILT_READ, EV_ADD, 0, 0, 0);
- num_events = kevent(loop.m_kqueue, in_events.data(), in_events.size(),
- out_events, std::size(out_events), nullptr);
+ num_events =
+ kevent(loop.m_kqueue, in_events.data(), in_events.size(), out_events,
+ std::size(out_events), ToTimeSpec(loop.GetNextWakeupTime()));
if (num_events < 0) {
if (errno == EINTR) {
@@ -145,7 +171,7 @@ Status MainLoopPosix::RunImpl::Poll() {
}
if (ppoll(read_fds.data(), read_fds.size(),
- /*timeout=*/nullptr,
+ ToTimeSpec(loop.GetNextWakeupTime()),
/*sigmask=*/nullptr) == -1 &&
errno != EINTR)
return Status(errno, eErrorTypePOSIX);
@@ -166,27 +192,28 @@ void MainLoopPosix::RunImpl::ProcessReadEvents() {
}
#endif
-MainLoopPosix::MainLoopPosix() : m_triggering(false) {
- Status error = m_trigger_pipe.CreateNew(/*child_process_inherit=*/false);
+MainLoopPosix::MainLoopPosix() {
+ Status error = m_interrupt_pipe.CreateNew(/*child_process_inherit=*/false);
assert(error.Success());
// Make the write end of the pipe non-blocking.
- int result = fcntl(m_trigger_pipe.GetWriteFileDescriptor(), F_SETFL,
- fcntl(m_trigger_pipe.GetWriteFileDescriptor(), F_GETFL) |
+ int result = fcntl(m_interrupt_pipe.GetWriteFileDescriptor(), F_SETFL,
+ fcntl(m_interrupt_pipe.GetWriteFileDescriptor(), F_GETFL) |
O_NONBLOCK);
assert(result == 0);
UNUSED_IF_ASSERT_DISABLED(result);
- const int trigger_pipe_fd = m_trigger_pipe.GetReadFileDescriptor();
- m_read_fds.insert({trigger_pipe_fd, [trigger_pipe_fd](MainLoopBase &loop) {
- char c;
- ssize_t bytes_read = llvm::sys::RetryAfterSignal(
- -1, ::read, trigger_pipe_fd, &c, 1);
- assert(bytes_read == 1);
- UNUSED_IF_ASSERT_DISABLED(bytes_read);
- // NB: This implicitly causes another loop iteration
- // and therefore the execution of pending callbacks.
- }});
+ const int interrupt_pipe_fd = m_interrupt_pipe.GetReadFileDescriptor();
+ m_read_fds.insert(
+ {interrupt_pipe_fd, [interrupt_pipe_fd](MainLoopBase &loop) {
+ char c;
+ ssize_t bytes_read =
+ llvm::sys::RetryAfterSignal(-1, ::read, interrupt_pipe_fd, &c, 1);
+ assert(bytes_read == 1);
+ UNUSED_IF_ASSERT_DISABLED(bytes_read);
+ // NB: This implicitly causes another loop iteration
+ // and therefore the execution of pending callbacks.
+ }});
#if HAVE_SYS_EVENT_H
m_kqueue = kqueue();
assert(m_kqueue >= 0);
@@ -197,8 +224,8 @@ MainLoopPosix::~MainLoopPosix() {
#if HAVE_SYS_EVENT_H
close(m_kqueue);
#endif
- m_read_fds.erase(m_trigger_pipe.GetReadFileDescriptor());
- m_trigger_pipe.Close();
+ m_read_fds.erase(m_interrupt_pipe.GetReadFileDescriptor());
+ m_interrupt_pipe.Close();
assert(m_read_fds.size() == 0);
assert(m_signals.size() == 0);
}
@@ -245,11 +272,9 @@ MainLoopPosix::RegisterSignal(int signo, const Callback &callback,
sigset_t old_set;
// Set signal info before installing the signal handler!
- g_signal_info[signo].pipe_fd = m_trigger_pipe.GetWriteFileDescriptor();
+ g_signal_info[signo].pipe_fd = m_interrupt_pipe.GetWriteFileDescriptor();
g_signal_info[signo].flag = 0;
- // Even if using kqueue, the signal handler will still be invoked, so it's
- // important to replace it with our "benign" handler.
int ret = sigaction(signo, &new_action, &info.old_action);
UNUSED_IF_ASSERT_DISABLED(ret);
assert(ret == 0 && "sigaction failed");
@@ -308,8 +333,8 @@ Status MainLoopPosix::Run() {
ProcessSignals();
- m_triggering = false;
- ProcessPendingCallbacks();
+ m_interrupting = false;
+ ProcessCallbacks();
}
return Status();
}
@@ -347,13 +372,13 @@ void MainLoopPosix::ProcessSignal(int signo) {
}
}
-void MainLoopPosix::TriggerPendingCallbacks() {
- if (m_triggering.exchange(true))
+void MainLoopPosix::Interrupt() {
+ if (m_interrupting.exchange(true))
return;
char c = '.';
size_t bytes_written;
- Status error = m_trigger_pipe.Write(&c, 1, bytes_written);
+ Status error = m_interrupt_pipe.Write(&c, 1, bytes_written);
assert(error.Success());
UNUSED_IF_ASSERT_DISABLED(error);
assert(bytes_written == 1);
diff --git a/lldb/source/Host/windows/MainLoopWindows.cpp b/lldb/source/Host/windows/MainLoopWindows.cpp
index c9aa6d339d8f48..0a5a35e9db9dde 100644
--- a/lldb/source/Host/windows/MainLoopWindows.cpp
+++ b/lldb/source/Host/windows/MainLoopWindows.cpp
@@ -21,14 +21,24 @@
using namespace lldb;
using namespace lldb_private;
+static DWORD ToTimeout(std::optional<MainLoopWindows::TimePoint> point) {
+ using namespace std::chrono;
+
+ if (!point)
+ return WSA_INFINITE;
+
+ nanoseconds dur = (std::max)(*point - steady_clock::now(), nanoseconds(0));
+ return duration_cast<milliseconds>(dur).count();
+}
+
MainLoopWindows::MainLoopWindows() {
- m_trigger_event = WSACreateEvent();
- assert(m_trigger_event != WSA_INVALID_EVENT);
+ m_interrupt_event = WSACreateEvent();
+ assert(m_interrupt_event != WSA_INVALID_EVENT);
}
MainLoopWindows::~MainLoopWindows() {
assert(m_read_fds.empty());
- BOOL result = WSACloseEvent(m_trigger_event);
+ BOOL result = WSACloseEvent(m_interrupt_event);
assert(result == TRUE);
UNUSED_IF_ASSERT_DISABLED(result);
}
@@ -43,10 +53,11 @@ llvm::Expected<size_t> MainLoopWindows::Poll() {
events.push_back(info.event);
}
- events.push_back(m_trigger_event);
+ events.push_back(m_interrupt_event);
- DWORD result = WSAWaitForMultipleEvents(events.size(), events.data(), FALSE,
- WSA_INFINITE, FALSE);
+ DWORD result =
+ WSAWaitForMultipleEvents(events.size(), events.data(), FALSE,
+ ToTimeout(GetNextWakeupTime()), FALSE);
for (auto &fd : m_read_fds) {
int result = WSAEventSelect(fd.first, WSA_INVALID_EVENT, 0);
@@ -54,9 +65,13 @@ llvm::Expected<size_t> MainLoopWindows::Poll() {
UNUSED_IF_ASSERT_DISABLED(result);
}
- if (result >= WSA_WAIT_EVENT_0 && result <= WSA_WAIT_EVENT_0 + events.size())
+ if (result >= WSA_WAIT_EVENT_0 && result < WSA_WAIT_EVENT_0 + events.size())
return result - WSA_WAIT_EVENT_0;
+ // A timeout is treated as a (premature) signalization of the interrupt event.
+ if (result == WSA_WAIT_TIMEOUT)
+ return events.size() - 1;
+
return llvm::createStringError(llvm::inconvertibleErrorCode(),
"WSAWaitForMultipleEvents failed");
}
@@ -127,13 +142,11 @@ Status MainLoopWindows::Run() {
ProcessReadObject(KV.first);
} else {
assert(*signaled_event == m_read_fds.size());
- WSAResetEvent(m_trigger_event);
+ WSAResetEvent(m_interrupt_event);
}
- ProcessPendingCallbacks();
+ ProcessCallbacks();
}
return Status();
}
-void MainLoopWindows::TriggerPendingCallbacks() {
- WSASetEvent(m_trigger_event);
-}
+void MainLoopWindows::Interrupt() { WSASetEvent(m_interrupt_event); }
diff --git a/lldb/unittests/Host/MainLoopTest.cpp b/lldb/unittests/Host/MainLoopTest.cpp
index 622a547fa22f04..e7425b737a6dab 100644
--- a/lldb/unittests/Host/MainLoopTest.cpp
+++ b/lldb/unittests/Host/MainLoopTest.cpp
@@ -15,6 +15,7 @@
#include "llvm/Config/llvm-config.h" // for LLVM_ON_UNIX
#include "llvm/Testing/Support/Error.h"
#include "gtest/gtest.h"
+#include <chrono>
#include <future>
#include <thread>
@@ -106,13 +107,9 @@ TEST_F(MainLoopTest, NoSpuriousReads) {
error);
ASSERT_THAT_ERROR(error.ToError(), llvm::Succeeded());
// Terminate the loop after one second.
- std::thread terminate_thread([&loop] {
- std::this_thread::sleep_for(std::chrono::seconds(1));
- loop.AddPendingCallback(
- [](MainLoopBase &loop) { loop.RequestTermination(); });
- });
+ loop.AddCallback([](MainLoopBase &loop) { loop.RequestTermination(); },
+ std::chrono::seconds(1));
ASSERT_THAT_ERROR(loop.Run().ToError(), llvm::Succeeded());
- terminate_thread.join();
// Make sure the callback was called only once.
ASSERT_EQ(1u, callback_count);
@@ -223,6 +220,61 @@ TEST_F(MainLoopTest, ManyPendingCallbacks) {
ASSERT_TRUE(loop.Run().Success());
}
+TEST_F(MainLoopTest, CallbackWithTimeout) {
+ MainLoop loop;
+ loop.AddCallback([](MainLoopBase &loop) { loop.RequestTermination(); },
+ std::chrono::seconds(2));
+ auto start = std::chrono::steady_clock::now();
+ ASSERT_THAT_ERROR(loop.Run().takeError(), llvm::Succeeded());
+ EXPECT_GE(std::chrono::steady_clock::now() - start, std::chrono::seconds(2));
+}
+
+TEST_F(MainLoopTest, TimedCallbacksRunInOrder) {
+ MainLoop loop;
+ auto start = std::chrono::steady_clock::now();
+ std::chrono::milliseconds epsilon(10);
+ std::vector<int> order;
+ auto add_cb = [&](int id) {
+ loop.AddCallback([&order, id](MainLoopBase &) { order.push_back(id); },
+ start + id * epsilon);
+ };
+ add_cb(3);
+ add_cb(2);
+ add_cb(4);
+ add_cb(1);
+ loop.AddCallback([](MainLoopBase &loop) { loop.RequestTermination(); },
+ start + 5 * epsilon);
+ ASSERT_THAT_ERROR(loop.Run().takeError(), llvm::Succeeded());
+ EXPECT_GE(std::chrono::steady_clock::now() - start, 5 * epsilon);
+ ASSERT_THAT(order, testing::ElementsAre(1, 2, 3, 4));
+}
+
+TEST_F(MainLoopTest, TimedCallbackShortensSleep) {
+ MainLoop loop;
+ auto start = std::chrono::steady_clock::now();
+ bool long_callback_called = false;
+ loop.AddCallback(
+ [&](MainLoopBase &loop) {
+ long_callback_called = true;
+ loop.RequestTermination();
+ },
+ std::chrono::seconds(30));
+ std::future<Status> async_run =
+ std::async(std::launch::async, &MainLoop::Run, std::ref(loop));
+ std::this_thread::sleep_for(std::chrono::milliseconds(100));
+ bool short_callback_called = false;
+ loop.AddCallback(
+ [&](MainLoopBase &loop) {
+ short_callback_called = true;
+ loop.RequestTermination();
+ },
+ std::chrono::seconds(1));
+ ASSERT_THAT_ERROR(async_run.get().takeError(), llvm::Succeeded());
+ EXPECT_LT(std::chrono::steady_clock::now() - start, std::chrono::seconds(10));
+ EXPECT_TRUE(short_callback_called);
+ EXPECT_FALSE(long_callback_called);
+}
+
#ifdef LLVM_ON_UNIX
TEST_F(MainLoopTest, DetectsEOF) {
>From 980d5fb2d014f87fa3f6249e9196bb28d7afd9c6 Mon Sep 17 00:00:00 2001
From: "Ivan R. Ivanov" <ivanov.i.aa at m.titech.ac.jp>
Date: Tue, 19 Nov 2024 16:43:05 +0900
Subject: [PATCH 18/22] [MLIR][omp] Add omp operations for OpenMP workshare
(#101443)
Add the `omp.workshare` and `omp.workshare.loop_wrapper` operations used for the implementation of the `workshare` construct in flang.
---
mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td | 43 ++++++++++++
mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp | 21 ++++++
mlir/test/Dialect/OpenMP/invalid.mlir | 38 +++++++++++
mlir/test/Dialect/OpenMP/ops.mlir | 67 +++++++++++++++++++
4 files changed, 169 insertions(+)
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
index f79a3eb88e4b5e..156e6eb371b85d 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
@@ -313,6 +313,49 @@ def SingleOp : OpenMP_Op<"single", traits = [
let hasVerifier = 1;
}
+//===----------------------------------------------------------------------===//
+// 2.8.3 Workshare Construct
+//===----------------------------------------------------------------------===//
+
+def WorkshareOp : OpenMP_Op<"workshare", traits = [
+ RecursiveMemoryEffects,
+ ], clauses = [
+ OpenMP_NowaitClause,
+ ], singleRegion = true> {
+ let summary = "workshare directive";
+ let description = [{
+ The workshare construct divides the execution of the enclosed structured
+ block into separate units of work, and causes the threads of the team to
+ share the work such that each unit is executed only once by one thread, in
+ the context of its implicit task
+
+ This operation is used for the intermediate representation of the workshare
+ block before the work gets divided between the threads. See the flang
+ LowerWorkshare pass for details.
+ }] # clausesDescription;
+
+ let builders = [
+ OpBuilder<(ins CArg<"const WorkshareOperands &">:$clauses)>
+ ];
+}
+
+def WorkshareLoopWrapperOp : OpenMP_Op<"workshare.loop_wrapper", traits = [
+ DeclareOpInterfaceMethods<LoopWrapperInterface>, NoTerminator,
+ RecursiveMemoryEffects, SingleBlock
+ ], singleRegion = true> {
+ let summary = "contains loop nests to be parallelized by workshare";
+ let description = [{
+ This operation wraps a loop nest that is marked for dividing into units of
+ work by an encompassing omp.workshare operation.
+ }];
+
+ let builders = [
+ OpBuilder<(ins), [{ build($_builder, $_state, {}); }]>
+ ];
+ let assemblyFormat = "$region attr-dict";
+ let hasVerifier = 1;
+}
+
//===----------------------------------------------------------------------===//
// Loop Nest
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index 19e0fa30a75715..94e71e089d4b18 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -1897,6 +1897,27 @@ LogicalResult SingleOp::verify() {
getCopyprivateSyms());
}
+//===----------------------------------------------------------------------===//
+// WorkshareOp
+//===----------------------------------------------------------------------===//
+
+void WorkshareOp::build(OpBuilder &builder, OperationState &state,
+ const WorkshareOperands &clauses) {
+ WorkshareOp::build(builder, state, clauses.nowait);
+}
+
+//===----------------------------------------------------------------------===//
+// WorkshareLoopWrapperOp
+//===----------------------------------------------------------------------===//
+
+LogicalResult WorkshareLoopWrapperOp::verify() {
+ if (!(*this)->getParentOfType<WorkshareOp>())
+ return emitError() << "must be nested in an omp.workshare";
+ if (getNestedWrapper())
+ return emitError() << "cannot be composite";
+ return success();
+}
+
//===----------------------------------------------------------------------===//
// LoopWrapperInterface
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Dialect/OpenMP/invalid.mlir b/mlir/test/Dialect/OpenMP/invalid.mlir
index aa41eea44f3ef4..2a19e4837f5504 100644
--- a/mlir/test/Dialect/OpenMP/invalid.mlir
+++ b/mlir/test/Dialect/OpenMP/invalid.mlir
@@ -2620,6 +2620,44 @@ func.func @omp_loop_invalid_binding(%lb : index, %ub : index, %step : index) {
omp.yield
}
}
+ return
+}
+
+// -----
+func.func @nested_wrapper(%idx : index) {
+ omp.workshare {
+ // expected-error @below {{cannot be composite}}
+ omp.workshare.loop_wrapper {
+ omp.simd {
+ omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) {
+ omp.yield
+ }
+ } {omp.composite}
+ }
+ omp.terminator
+ }
+ return
+}
+
+// -----
+func.func @not_wrapper() {
+ omp.workshare {
+ // expected-error @below {{op nested in loop wrapper is not another loop wrapper or `omp.loop_nest`}}
+ omp.workshare.loop_wrapper {
+ %0 = arith.constant 0 : index
+ }
+ omp.terminator
+ }
+ return
+}
+// -----
+func.func @missing_workshare(%idx : index) {
+ // expected-error @below {{must be nested in an omp.workshare}}
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) {
+ omp.yield
+ }
+ }
return
}
diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir
index 69c53d1f77e841..c25a6ef4b4849b 100644
--- a/mlir/test/Dialect/OpenMP/ops.mlir
+++ b/mlir/test/Dialect/OpenMP/ops.mlir
@@ -2789,3 +2789,70 @@ func.func @omp_loop(%lb : index, %ub : index, %step : index) {
return
}
+
+// CHECK-LABEL: func @omp_workshare
+func.func @omp_workshare() {
+ // CHECK: omp.workshare {
+ omp.workshare {
+ "test.payload"() : () -> ()
+ // CHECK: omp.terminator
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: func @omp_workshare_nowait
+func.func @omp_workshare_nowait() {
+ // CHECK: omp.workshare nowait {
+ omp.workshare nowait {
+ "test.payload"() : () -> ()
+ // CHECK: omp.terminator
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: func @omp_workshare_multiple_blocks
+func.func @omp_workshare_multiple_blocks() {
+ // CHECK: omp.workshare {
+ omp.workshare {
+ cf.br ^bb2
+ ^bb2:
+ // CHECK: omp.terminator
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: func @omp_workshare_loop_wrapper
+func.func @omp_workshare_loop_wrapper(%idx : index) {
+ // CHECK-NEXT: omp.workshare {
+ omp.workshare {
+ // CHECK-NEXT: omp.workshare.loop_wrapper
+ omp.workshare.loop_wrapper {
+ // CHECK-NEXT: omp.loop_nest
+ omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) {
+ omp.yield
+ }
+ }
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: func @omp_workshare_loop_wrapper_attrs
+func.func @omp_workshare_loop_wrapper_attrs(%idx : index) {
+ // CHECK-NEXT: omp.workshare {
+ omp.workshare {
+ // CHECK-NEXT: omp.workshare.loop_wrapper {
+ omp.workshare.loop_wrapper {
+ // CHECK-NEXT: omp.loop_nest
+ omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) {
+ omp.yield
+ }
+ // CHECK: } {attr_in_dict}
+ } {attr_in_dict}
+ omp.terminator
+ }
+ return
+}
>From 02b8ee281947f6cb39c7eb3c4bbba59322e9015b Mon Sep 17 00:00:00 2001
From: serge-sans-paille <sguelton at mozilla.com>
Date: Tue, 19 Nov 2024 07:45:18 +0000
Subject: [PATCH 19/22] [llvm] Improve llvm.objectsize computation by computing
GEP, alloca and malloc parameters bound (#115522)
Using a naive expression walker, it is possible to compute valuable
information for
allocation functions, GEP and alloca, even in the presence of some
dynamic
information.
We don't rely on computeConstantRange to avoid taking advantage of
undefined behavior, which would be counter-productive wrt. usual
llvm.objectsize usage.
llvm.objectsize plays an important role in _FORTIFY_SOURCE definitions,
so improving its diagnostic in turns improves the security of compiled
application.
As a side note, as a result of recent optimization improvements, clang
no
longer passes
https://github.com/serge-sans-paille/builtin_object_size-test-suite
This commit restores the situation and greatly improves the scope of
code handled by the static version of __builtin_object_size.
---
llvm/include/llvm/IR/Value.h | 12 +-
llvm/lib/Analysis/MemoryBuiltins.cpp | 108 ++++++++++++++++-
.../builtin-object-size-range.ll | 109 ++++++++++++++++++
3 files changed, 221 insertions(+), 8 deletions(-)
create mode 100644 llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
diff --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h
index 945081b77e9536..d444a768a65436 100644
--- a/llvm/include/llvm/IR/Value.h
+++ b/llvm/include/llvm/IR/Value.h
@@ -723,12 +723,16 @@ class Value {
bool AllowInvariantGroup = false,
function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
nullptr) const;
- Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
- bool AllowNonInbounds,
- bool AllowInvariantGroup = false) {
+
+ Value *stripAndAccumulateConstantOffsets(
+ const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
+ bool AllowInvariantGroup = false,
+ function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
+ nullptr) {
return const_cast<Value *>(
static_cast<const Value *>(this)->stripAndAccumulateConstantOffsets(
- DL, Offset, AllowNonInbounds, AllowInvariantGroup));
+ DL, Offset, AllowNonInbounds, AllowInvariantGroup,
+ ExternalAnalysis));
}
/// This is a wrapper around stripAndAccumulateConstantOffsets with the
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index cd8594d670502d..6c0940c4c81ebe 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -673,6 +673,69 @@ STATISTIC(ObjectVisitorArgument,
STATISTIC(ObjectVisitorLoad,
"Number of load instructions with unsolved size and offset");
+static std::optional<APInt>
+combinePossibleConstantValues(std::optional<APInt> LHS,
+ std::optional<APInt> RHS,
+ ObjectSizeOpts::Mode EvalMode) {
+ if (!LHS || !RHS)
+ return std::nullopt;
+ if (EvalMode == ObjectSizeOpts::Mode::Max)
+ return LHS->sge(*RHS) ? *LHS : *RHS;
+ else
+ return LHS->sle(*RHS) ? *LHS : *RHS;
+}
+
+static std::optional<APInt> aggregatePossibleConstantValuesImpl(
+ const Value *V, ObjectSizeOpts::Mode EvalMode, unsigned recursionDepth) {
+ constexpr unsigned maxRecursionDepth = 4;
+ if (recursionDepth == maxRecursionDepth)
+ return std::nullopt;
+
+ if (const auto *CI = dyn_cast<ConstantInt>(V)) {
+ return CI->getValue();
+ }
+
+ else if (const auto *SI = dyn_cast<SelectInst>(V)) {
+ return combinePossibleConstantValues(
+ aggregatePossibleConstantValuesImpl(SI->getTrueValue(), EvalMode,
+ recursionDepth + 1),
+ aggregatePossibleConstantValuesImpl(SI->getFalseValue(), EvalMode,
+ recursionDepth + 1),
+ EvalMode);
+ }
+
+ else if (const auto *PN = dyn_cast<PHINode>(V)) {
+ unsigned Count = PN->getNumIncomingValues();
+ if (Count == 0)
+ return std::nullopt;
+ auto Acc = aggregatePossibleConstantValuesImpl(
+ PN->getIncomingValue(0), EvalMode, recursionDepth + 1);
+ for (unsigned I = 1; Acc && I < Count; ++I) {
+ auto Tmp = aggregatePossibleConstantValuesImpl(
+ PN->getIncomingValue(1), EvalMode, recursionDepth + 1);
+ Acc = combinePossibleConstantValues(Acc, Tmp, EvalMode);
+ }
+ return Acc;
+ }
+
+ return std::nullopt;
+}
+
+static std::optional<APInt>
+aggregatePossibleConstantValues(const Value *V, ObjectSizeOpts::Mode EvalMode) {
+ if (auto *CI = dyn_cast<ConstantInt>(V))
+ return CI->getValue();
+
+ if (EvalMode != ObjectSizeOpts::Mode::Min &&
+ EvalMode != ObjectSizeOpts::Mode::Max)
+ return std::nullopt;
+
+ // Not using computeConstantRange here because we cannot guarantee it's not
+ // doing optimization based on UB which we want to avoid when expanding
+ // __builtin_object_size.
+ return aggregatePossibleConstantValuesImpl(V, EvalMode, 0u);
+}
+
/// Align \p Size according to \p Alignment. If \p Size is greater than
/// getSignedMaxValue(), set it as unknown as we can only represent signed value
/// in OffsetSpan.
@@ -720,11 +783,36 @@ OffsetSpan ObjectSizeOffsetVisitor::computeImpl(Value *V) {
V = V->stripAndAccumulateConstantOffsets(
DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true);
+ // Give it another try with approximated analysis. We don't start with this
+ // one because stripAndAccumulateConstantOffsets behaves differently wrt.
+ // overflows if we provide an external Analysis.
+ if ((Options.EvalMode == ObjectSizeOpts::Mode::Min ||
+ Options.EvalMode == ObjectSizeOpts::Mode::Max) &&
+ isa<GEPOperator>(V)) {
+ // External Analysis used to compute the Min/Max value of individual Offsets
+ // within a GEP.
+ ObjectSizeOpts::Mode EvalMode =
+ Options.EvalMode == ObjectSizeOpts::Mode::Min
+ ? ObjectSizeOpts::Mode::Max
+ : ObjectSizeOpts::Mode::Min;
+ auto OffsetRangeAnalysis = [EvalMode](Value &VOffset, APInt &Offset) {
+ if (auto PossibleOffset =
+ aggregatePossibleConstantValues(&VOffset, EvalMode)) {
+ Offset = *PossibleOffset;
+ return true;
+ }
+ return false;
+ };
+
+ V = V->stripAndAccumulateConstantOffsets(
+ DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true,
+ /*ExternalAnalysis=*/OffsetRangeAnalysis);
+ }
+
// Later we use the index type size and zero but it will match the type of the
// value that is passed to computeImpl.
IntTyBits = DL.getIndexTypeSizeInBits(V->getType());
Zero = APInt::getZero(IntTyBits);
-
OffsetSpan ORT = computeValue(V);
bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits;
@@ -812,8 +900,9 @@ OffsetSpan ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
return OffsetSpan(Zero, align(Size, I.getAlign()));
Value *ArraySize = I.getArraySize();
- if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
- APInt NumElems = C->getValue();
+ if (auto PossibleSize =
+ aggregatePossibleConstantValues(ArraySize, Options.EvalMode)) {
+ APInt NumElems = *PossibleSize;
if (!CheckedZextOrTrunc(NumElems))
return ObjectSizeOffsetVisitor::unknown();
@@ -839,7 +928,18 @@ OffsetSpan ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
}
OffsetSpan ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) {
- if (std::optional<APInt> Size = getAllocSize(&CB, TLI)) {
+ auto Mapper = [this](const Value *V) -> const Value * {
+ if (!V->getType()->isIntegerTy())
+ return V;
+
+ if (auto PossibleBound =
+ aggregatePossibleConstantValues(V, Options.EvalMode))
+ return ConstantInt::get(V->getType(), *PossibleBound);
+
+ return V;
+ };
+
+ if (std::optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) {
// Very large unsigned value cannot be represented as OffsetSpan.
if (Size->isNegative())
return ObjectSizeOffsetVisitor::unknown();
diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
new file mode 100644
index 00000000000000..f84ebee1442893
--- /dev/null
+++ b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
@@ -0,0 +1,109 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -passes=lower-constant-intrinsics -S < %s | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare i64 @llvm.objectsize.i64.p0(ptr, i1 immarg, i1 immarg, i1 immarg)
+declare noalias ptr @malloc(i64 noundef) #0
+
+define i64 @select_alloc_size(i1 %cond) {
+; CHECK-LABEL: @select_alloc_size(
+; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 [[SIZE]], align 1
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %size = select i1 %cond, i64 3, i64 4
+ %ptr = alloca i8, i64 %size
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
+ %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_malloc_size(i1 %cond) {
+; CHECK-LABEL: @select_malloc_size(
+; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
+; CHECK-NEXT: [[PTR:%.*]] = call noalias ptr @malloc(i64 noundef [[SIZE]])
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %size = select i1 %cond, i64 3, i64 4
+ %ptr = call noalias ptr @malloc(i64 noundef %size)
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
+ %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_gep_offset(i1 %cond) {
+; CHECK-LABEL: @select_gep_offset(
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
+; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
+; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[OFFSET]]
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 7, i64 6
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %ptr = alloca i8, i64 10
+ %offset = select i1 %cond, i64 3, i64 4
+ %ptr.slide = getelementptr inbounds i8, ptr %ptr, i64 %offset
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
+ %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_gep_neg_offset(i1 %c0, i1 %c1) {
+; CHECK-LABEL: @select_gep_neg_offset(
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
+; CHECK-NEXT: [[PTR_SLIDE_1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 5
+; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 -3, i64 -4
+; CHECK-NEXT: [[PTR_SLIDE_2:%.*]] = getelementptr inbounds i8, ptr [[PTR_SLIDE_1]], i64 [[OFFSET]]
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[C1:%.*]], i64 9, i64 8
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %ptr = alloca i8, i64 10
+ %ptr.slide.1 = getelementptr inbounds i8, ptr %ptr, i64 5
+ %offset = select i1 %c0, i64 -3, i64 -4
+ %ptr.slide.2 = getelementptr inbounds i8, ptr %ptr.slide.1, i64 %offset
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 true, i1 true, i1 false)
+ %res = select i1 %c1, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_neg_oob_offset(i1 %c0, i1 %c1) {
+; CHECK-LABEL: @select_neg_oob_offset(
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
+; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[C0:%.*]], i64 -3, i64 -4
+; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[OFFSET]]
+; CHECK-NEXT: ret i64 0
+;
+ %ptr = alloca i8, i64 10
+ %offset = select i1 %c0, i64 -3, i64 -4
+ %ptr.slide = getelementptr inbounds i8, ptr %ptr, i64 %offset
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
+ %res = select i1 %c1, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_gep_offsets(i1 %cond) {
+; CHECK-LABEL: @select_gep_offsets(
+; CHECK-NEXT: [[PTR:%.*]] = alloca [10 x i8], i64 2, align 1
+; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i32 0, i32 1
+; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds [10 x i8], ptr [[PTR]], i32 [[OFFSET]], i32 5
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 15, i64 5
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %ptr = alloca [10 x i8], i64 2
+ %offset = select i1 %cond, i32 0, i32 1
+ %ptr.slide = getelementptr inbounds [10 x i8], ptr %ptr, i32 %offset, i32 5
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
+ %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+attributes #0 = { nounwind allocsize(0) }
>From 7d6713db600af1b4381149a0e794cbce99ca6cb2 Mon Sep 17 00:00:00 2001
From: "Ivan R. Ivanov" <ivanov.i.aa at m.titech.ac.jp>
Date: Tue, 19 Nov 2024 16:58:30 +0900
Subject: [PATCH 20/22] [flang][omp] Emit omp.workshare in frontend (#101444)
Emit the contents of OpenMP workshare constructs in `omp.workshare`.
---
flang/lib/Lower/OpenMP/OpenMP.cpp | 32 +++++++++++++++++++++++----
flang/test/Lower/OpenMP/workshare.f90 | 6 ++---
2 files changed, 31 insertions(+), 7 deletions(-)
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index 879a3e0ad70780..a2779213a1a15a 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -1372,6 +1372,15 @@ static void genTaskwaitClauses(lower::AbstractConverter &converter,
loc, llvm::omp::Directive::OMPD_taskwait);
}
+static void genWorkshareClauses(lower::AbstractConverter &converter,
+ semantics::SemanticsContext &semaCtx,
+ lower::StatementContext &stmtCtx,
+ const List<Clause> &clauses, mlir::Location loc,
+ mlir::omp::WorkshareOperands &clauseOps) {
+ ClauseProcessor cp(converter, semaCtx, clauses);
+ cp.processNowait(clauseOps);
+}
+
static void genTeamsClauses(lower::AbstractConverter &converter,
semantics::SemanticsContext &semaCtx,
lower::StatementContext &stmtCtx,
@@ -2033,6 +2042,24 @@ genTaskyieldOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
return converter.getFirOpBuilder().create<mlir::omp::TaskyieldOp>(loc);
}
+static mlir::omp::WorkshareOp
+genWorkshareOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
+ semantics::SemanticsContext &semaCtx,
+ lower::pft::Evaluation &eval, mlir::Location loc,
+ const ConstructQueue &queue,
+ ConstructQueue::const_iterator item) {
+ lower::StatementContext stmtCtx;
+ mlir::omp::WorkshareOperands clauseOps;
+ genWorkshareClauses(converter, semaCtx, stmtCtx, item->clauses, loc,
+ clauseOps);
+
+ return genOpWithBody<mlir::omp::WorkshareOp>(
+ OpWithBodyGenInfo(converter, symTable, semaCtx, loc, eval,
+ llvm::omp::Directive::OMPD_workshare)
+ .setClauses(&item->clauses),
+ queue, item, clauseOps);
+}
+
static mlir::omp::TeamsOp
genTeamsOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
semantics::SemanticsContext &semaCtx, lower::pft::Evaluation &eval,
@@ -2631,10 +2658,7 @@ static void genOMPDispatch(lower::AbstractConverter &converter,
llvm::omp::getOpenMPDirectiveName(dir) + ")");
// case llvm::omp::Directive::OMPD_workdistribute:
case llvm::omp::Directive::OMPD_workshare:
- // FIXME: Workshare is not a commonly used OpenMP construct, an
- // implementation for this feature will come later. For the codes
- // that use this construct, add a single construct for now.
- genSingleOp(converter, symTable, semaCtx, eval, loc, queue, item);
+ genWorkshareOp(converter, symTable, semaCtx, eval, loc, queue, item);
break;
default:
// Combined and composite constructs should have been split into a sequence
diff --git a/flang/test/Lower/OpenMP/workshare.f90 b/flang/test/Lower/OpenMP/workshare.f90
index 1e11677a15e1f0..8e771952f5b6da 100644
--- a/flang/test/Lower/OpenMP/workshare.f90
+++ b/flang/test/Lower/OpenMP/workshare.f90
@@ -6,7 +6,7 @@ subroutine sb1(arr)
integer :: arr(:)
!CHECK: omp.parallel {
!$omp parallel
-!CHECK: omp.single {
+!CHECK: omp.workshare {
!$omp workshare
arr = 0
!$omp end workshare
@@ -20,7 +20,7 @@ subroutine sb2(arr)
integer :: arr(:)
!CHECK: omp.parallel {
!$omp parallel
-!CHECK: omp.single nowait {
+!CHECK: omp.workshare nowait {
!$omp workshare
arr = 0
!$omp end workshare nowait
@@ -33,7 +33,7 @@ subroutine sb2(arr)
subroutine sb3(arr)
integer :: arr(:)
!CHECK: omp.parallel {
-!CHECK: omp.single {
+!CHECK: omp.workshare {
!$omp parallel workshare
arr = 0
!$omp end parallel workshare
>From 8bb21ae6c92c03b2487ee9b0df584c7a17446863 Mon Sep 17 00:00:00 2001
From: "Ivan R. Ivanov" <ivanov.i.aa at m.titech.ac.jp>
Date: Tue, 19 Nov 2024 17:00:04 +0900
Subject: [PATCH 21/22] [flang] Introduce custom loop nest generation for loops
in workshare construct (#101445)
This alternative loop nest generation is used to generate an OpenMP loop nest instead of fir loops to facilitate parallelizing statements in an OpenMP `workshare` construct.
---
.../flang/Optimizer/Builder/HLFIRTools.h | 12 +++--
flang/lib/Lower/ConvertCall.cpp | 2 +-
flang/lib/Lower/OpenMP/ReductionProcessor.cpp | 4 +-
flang/lib/Optimizer/Builder/HLFIRTools.cpp | 51 ++++++++++++++-----
.../HLFIR/Transforms/BufferizeHLFIR.cpp | 3 +-
.../LowerHLFIROrderedAssignments.cpp | 33 ++++++------
.../Transforms/OptimizedBufferization.cpp | 6 +--
7 files changed, 68 insertions(+), 43 deletions(-)
diff --git a/flang/include/flang/Optimizer/Builder/HLFIRTools.h b/flang/include/flang/Optimizer/Builder/HLFIRTools.h
index 6b41025eea0780..f073f494b3fb21 100644
--- a/flang/include/flang/Optimizer/Builder/HLFIRTools.h
+++ b/flang/include/flang/Optimizer/Builder/HLFIRTools.h
@@ -357,8 +357,8 @@ hlfir::ElementalOp genElementalOp(
/// Structure to describe a loop nest.
struct LoopNest {
- fir::DoLoopOp outerLoop;
- fir::DoLoopOp innerLoop;
+ mlir::Operation *outerOp = nullptr;
+ mlir::Block *body = nullptr;
llvm::SmallVector<mlir::Value> oneBasedIndices;
};
@@ -366,11 +366,13 @@ struct LoopNest {
/// \p isUnordered specifies whether the loops in the loop nest
/// are unordered.
LoopNest genLoopNest(mlir::Location loc, fir::FirOpBuilder &builder,
- mlir::ValueRange extents, bool isUnordered = false);
+ mlir::ValueRange extents, bool isUnordered = false,
+ bool emitWorkshareLoop = false);
inline LoopNest genLoopNest(mlir::Location loc, fir::FirOpBuilder &builder,
- mlir::Value shape, bool isUnordered = false) {
+ mlir::Value shape, bool isUnordered = false,
+ bool emitWorkshareLoop = false) {
return genLoopNest(loc, builder, getIndexExtents(loc, builder, shape),
- isUnordered);
+ isUnordered, emitWorkshareLoop);
}
/// Inline the body of an hlfir.elemental at the current insertion point
diff --git a/flang/lib/Lower/ConvertCall.cpp b/flang/lib/Lower/ConvertCall.cpp
index 9f5b58590fb79e..e84e7afbe82e09 100644
--- a/flang/lib/Lower/ConvertCall.cpp
+++ b/flang/lib/Lower/ConvertCall.cpp
@@ -2135,7 +2135,7 @@ class ElementalCallBuilder {
hlfir::genLoopNest(loc, builder, shape, !mustBeOrdered);
mlir::ValueRange oneBasedIndices = loopNest.oneBasedIndices;
auto insPt = builder.saveInsertionPoint();
- builder.setInsertionPointToStart(loopNest.innerLoop.getBody());
+ builder.setInsertionPointToStart(loopNest.body);
callContext.stmtCtx.pushScope();
for (auto &preparedActual : loweredActuals)
if (preparedActual)
diff --git a/flang/lib/Lower/OpenMP/ReductionProcessor.cpp b/flang/lib/Lower/OpenMP/ReductionProcessor.cpp
index 6b98ea3d0615b6..736de2ee511bef 100644
--- a/flang/lib/Lower/OpenMP/ReductionProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/ReductionProcessor.cpp
@@ -374,7 +374,7 @@ static void genBoxCombiner(fir::FirOpBuilder &builder, mlir::Location loc,
// know this won't miss any opportuinties for clever elemental inlining
hlfir::LoopNest nest = hlfir::genLoopNest(
loc, builder, shapeShift.getExtents(), /*isUnordered=*/true);
- builder.setInsertionPointToStart(nest.innerLoop.getBody());
+ builder.setInsertionPointToStart(nest.body);
mlir::Type refTy = fir::ReferenceType::get(seqTy.getEleTy());
auto lhsEleAddr = builder.create<fir::ArrayCoorOp>(
loc, refTy, lhs, shapeShift, /*slice=*/mlir::Value{},
@@ -388,7 +388,7 @@ static void genBoxCombiner(fir::FirOpBuilder &builder, mlir::Location loc,
builder, loc, redId, refTy, lhsEle, rhsEle);
builder.create<fir::StoreOp>(loc, scalarReduction, lhsEleAddr);
- builder.setInsertionPointAfter(nest.outerLoop);
+ builder.setInsertionPointAfter(nest.outerOp);
builder.create<mlir::omp::YieldOp>(loc, lhsAddr);
}
diff --git a/flang/lib/Optimizer/Builder/HLFIRTools.cpp b/flang/lib/Optimizer/Builder/HLFIRTools.cpp
index 8d0ae2f195178c..7425ccf7fc0e30 100644
--- a/flang/lib/Optimizer/Builder/HLFIRTools.cpp
+++ b/flang/lib/Optimizer/Builder/HLFIRTools.cpp
@@ -20,6 +20,7 @@
#include "mlir/IR/IRMapping.h"
#include "mlir/Support/LLVM.h"
#include "llvm/ADT/TypeSwitch.h"
+#include <mlir/Dialect/OpenMP/OpenMPDialect.h>
#include <optional>
// Return explicit extents. If the base is a fir.box, this won't read it to
@@ -855,26 +856,50 @@ mlir::Value hlfir::inlineElementalOp(
hlfir::LoopNest hlfir::genLoopNest(mlir::Location loc,
fir::FirOpBuilder &builder,
- mlir::ValueRange extents, bool isUnordered) {
+ mlir::ValueRange extents, bool isUnordered,
+ bool emitWorkshareLoop) {
+ emitWorkshareLoop = emitWorkshareLoop && isUnordered;
hlfir::LoopNest loopNest;
assert(!extents.empty() && "must have at least one extent");
- auto insPt = builder.saveInsertionPoint();
+ mlir::OpBuilder::InsertionGuard guard(builder);
loopNest.oneBasedIndices.assign(extents.size(), mlir::Value{});
// Build loop nest from column to row.
auto one = builder.create<mlir::arith::ConstantIndexOp>(loc, 1);
mlir::Type indexType = builder.getIndexType();
- unsigned dim = extents.size() - 1;
- for (auto extent : llvm::reverse(extents)) {
- auto ub = builder.createConvert(loc, indexType, extent);
- loopNest.innerLoop =
- builder.create<fir::DoLoopOp>(loc, one, ub, one, isUnordered);
- builder.setInsertionPointToStart(loopNest.innerLoop.getBody());
- // Reverse the indices so they are in column-major order.
- loopNest.oneBasedIndices[dim--] = loopNest.innerLoop.getInductionVar();
- if (!loopNest.outerLoop)
- loopNest.outerLoop = loopNest.innerLoop;
+ if (emitWorkshareLoop) {
+ auto wslw = builder.create<mlir::omp::WorkshareLoopWrapperOp>(loc);
+ loopNest.outerOp = wslw;
+ builder.createBlock(&wslw.getRegion());
+ mlir::omp::LoopNestOperands lnops;
+ lnops.loopInclusive = builder.getUnitAttr();
+ for (auto extent : llvm::reverse(extents)) {
+ lnops.loopLowerBounds.push_back(one);
+ lnops.loopUpperBounds.push_back(extent);
+ lnops.loopSteps.push_back(one);
+ }
+ auto lnOp = builder.create<mlir::omp::LoopNestOp>(loc, lnops);
+ mlir::Block *block = builder.createBlock(&lnOp.getRegion());
+ for (auto extent : llvm::reverse(extents))
+ block->addArgument(extent.getType(), extent.getLoc());
+ loopNest.body = block;
+ builder.create<mlir::omp::YieldOp>(loc);
+ for (unsigned dim = 0; dim < extents.size(); dim++)
+ loopNest.oneBasedIndices[extents.size() - dim - 1] =
+ lnOp.getRegion().front().getArgument(dim);
+ } else {
+ unsigned dim = extents.size() - 1;
+ for (auto extent : llvm::reverse(extents)) {
+ auto ub = builder.createConvert(loc, indexType, extent);
+ auto doLoop =
+ builder.create<fir::DoLoopOp>(loc, one, ub, one, isUnordered);
+ loopNest.body = doLoop.getBody();
+ builder.setInsertionPointToStart(loopNest.body);
+ // Reverse the indices so they are in column-major order.
+ loopNest.oneBasedIndices[dim--] = doLoop.getInductionVar();
+ if (!loopNest.outerOp)
+ loopNest.outerOp = doLoop;
+ }
}
- builder.restoreInsertionPoint(insPt);
return loopNest;
}
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp b/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp
index a70a6b388c4b1a..07794828fce267 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp
@@ -26,6 +26,7 @@
#include "flang/Optimizer/HLFIR/HLFIRDialect.h"
#include "flang/Optimizer/HLFIR/HLFIROps.h"
#include "flang/Optimizer/HLFIR/Passes.h"
+#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
#include "mlir/IR/Dominance.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
@@ -793,7 +794,7 @@ struct ElementalOpConversion
hlfir::LoopNest loopNest =
hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered());
auto insPt = builder.saveInsertionPoint();
- builder.setInsertionPointToStart(loopNest.innerLoop.getBody());
+ builder.setInsertionPointToStart(loopNest.body);
auto yield = hlfir::inlineElementalOp(loc, builder, elemental,
loopNest.oneBasedIndices);
hlfir::Entity elementValue(yield.getElementValue());
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp
index 85dd517cb57914..424566462e8fe0 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp
@@ -464,7 +464,7 @@ void OrderedAssignmentRewriter::pre(hlfir::RegionAssignOp regionAssignOp) {
// if the LHS is not).
mlir::Value shape = hlfir::genShape(loc, builder, lhsEntity);
elementalLoopNest = hlfir::genLoopNest(loc, builder, shape);
- builder.setInsertionPointToStart(elementalLoopNest->innerLoop.getBody());
+ builder.setInsertionPointToStart(elementalLoopNest->body);
lhsEntity = hlfir::getElementAt(loc, builder, lhsEntity,
elementalLoopNest->oneBasedIndices);
rhsEntity = hlfir::getElementAt(loc, builder, rhsEntity,
@@ -484,7 +484,7 @@ void OrderedAssignmentRewriter::pre(hlfir::RegionAssignOp regionAssignOp) {
for (auto &cleanupConversion : argConversionCleanups)
cleanupConversion();
if (elementalLoopNest)
- builder.setInsertionPointAfter(elementalLoopNest->outerLoop);
+ builder.setInsertionPointAfter(elementalLoopNest->outerOp);
} else {
// TODO: preserve allocatable assignment aspects for forall once
// they are conveyed in hlfir.region_assign.
@@ -492,8 +492,7 @@ void OrderedAssignmentRewriter::pre(hlfir::RegionAssignOp regionAssignOp) {
}
generateCleanupIfAny(loweredLhs.elementalCleanup);
if (loweredLhs.vectorSubscriptLoopNest)
- builder.setInsertionPointAfter(
- loweredLhs.vectorSubscriptLoopNest->outerLoop);
+ builder.setInsertionPointAfter(loweredLhs.vectorSubscriptLoopNest->outerOp);
generateCleanupIfAny(oldRhsYield);
generateCleanupIfAny(loweredLhs.nonElementalCleanup);
}
@@ -518,8 +517,8 @@ void OrderedAssignmentRewriter::pre(hlfir::WhereOp whereOp) {
hlfir::Entity savedMask{maybeSaved->first};
mlir::Value shape = hlfir::genShape(loc, builder, savedMask);
whereLoopNest = hlfir::genLoopNest(loc, builder, shape);
- constructStack.push_back(whereLoopNest->outerLoop.getOperation());
- builder.setInsertionPointToStart(whereLoopNest->innerLoop.getBody());
+ constructStack.push_back(whereLoopNest->outerOp);
+ builder.setInsertionPointToStart(whereLoopNest->body);
mlir::Value cdt = hlfir::getElementAt(loc, builder, savedMask,
whereLoopNest->oneBasedIndices);
generateMaskIfOp(cdt);
@@ -527,7 +526,7 @@ void OrderedAssignmentRewriter::pre(hlfir::WhereOp whereOp) {
// If this is the same run as the one that saved the value, the clean-up
// was left-over to be done now.
auto insertionPoint = builder.saveInsertionPoint();
- builder.setInsertionPointAfter(whereLoopNest->outerLoop);
+ builder.setInsertionPointAfter(whereLoopNest->outerOp);
generateCleanupIfAny(maybeSaved->second);
builder.restoreInsertionPoint(insertionPoint);
}
@@ -539,8 +538,8 @@ void OrderedAssignmentRewriter::pre(hlfir::WhereOp whereOp) {
mask.generateNoneElementalPart(builder, mapper);
mlir::Value shape = mask.generateShape(builder, mapper);
whereLoopNest = hlfir::genLoopNest(loc, builder, shape);
- constructStack.push_back(whereLoopNest->outerLoop.getOperation());
- builder.setInsertionPointToStart(whereLoopNest->innerLoop.getBody());
+ constructStack.push_back(whereLoopNest->outerOp);
+ builder.setInsertionPointToStart(whereLoopNest->body);
mlir::Value cdt = generateMaskedEntity(mask);
generateMaskIfOp(cdt);
return;
@@ -754,7 +753,7 @@ OrderedAssignmentRewriter::generateYieldedLHS(
loweredLhs.vectorSubscriptLoopNest = hlfir::genLoopNest(
loc, builder, loweredLhs.vectorSubscriptShape.value());
builder.setInsertionPointToStart(
- loweredLhs.vectorSubscriptLoopNest->innerLoop.getBody());
+ loweredLhs.vectorSubscriptLoopNest->body);
}
loweredLhs.lhs = temp->second.fetch(loc, builder);
return loweredLhs;
@@ -771,8 +770,7 @@ OrderedAssignmentRewriter::generateYieldedLHS(
loweredLhs.vectorSubscriptLoopNest =
hlfir::genLoopNest(loc, builder, *loweredLhs.vectorSubscriptShape,
!elementalAddrLhs.isOrdered());
- builder.setInsertionPointToStart(
- loweredLhs.vectorSubscriptLoopNest->innerLoop.getBody());
+ builder.setInsertionPointToStart(loweredLhs.vectorSubscriptLoopNest->body);
mapper.map(elementalAddrLhs.getIndices(),
loweredLhs.vectorSubscriptLoopNest->oneBasedIndices);
for (auto &op : elementalAddrLhs.getBody().front().without_terminator())
@@ -798,11 +796,11 @@ OrderedAssignmentRewriter::generateMaskedEntity(MaskedArrayExpr &maskedExpr) {
if (!maskedExpr.noneElementalPartWasGenerated) {
// Generate none elemental part before the where loops (but inside the
// current forall loops if any).
- builder.setInsertionPoint(whereLoopNest->outerLoop);
+ builder.setInsertionPoint(whereLoopNest->outerOp);
maskedExpr.generateNoneElementalPart(builder, mapper);
}
// Generate the none elemental part cleanup after the where loops.
- builder.setInsertionPointAfter(whereLoopNest->outerLoop);
+ builder.setInsertionPointAfter(whereLoopNest->outerOp);
maskedExpr.generateNoneElementalCleanupIfAny(builder, mapper);
// Generate the value of the current element for the masked expression
// at the current insertion point (inside the where loops, and any fir.if
@@ -1242,7 +1240,7 @@ void OrderedAssignmentRewriter::saveLeftHandSide(
LhsValueAndCleanUp loweredLhs = generateYieldedLHS(loc, region);
fir::factory::TemporaryStorage *temp = nullptr;
if (loweredLhs.vectorSubscriptLoopNest)
- constructStack.push_back(loweredLhs.vectorSubscriptLoopNest->outerLoop);
+ constructStack.push_back(loweredLhs.vectorSubscriptLoopNest->outerOp);
if (loweredLhs.vectorSubscriptLoopNest && !rhsIsArray(regionAssignOp)) {
// Vector subscripted entity for which the shape must also be saved on top
// of the element addresses (e.g. the shape may change in each forall
@@ -1265,7 +1263,7 @@ void OrderedAssignmentRewriter::saveLeftHandSide(
// subscripted LHS.
auto &vectorTmp = temp->cast<fir::factory::AnyVectorSubscriptStack>();
auto insertionPoint = builder.saveInsertionPoint();
- builder.setInsertionPoint(loweredLhs.vectorSubscriptLoopNest->outerLoop);
+ builder.setInsertionPoint(loweredLhs.vectorSubscriptLoopNest->outerOp);
vectorTmp.pushShape(loc, builder, shape);
builder.restoreInsertionPoint(insertionPoint);
} else {
@@ -1290,8 +1288,7 @@ void OrderedAssignmentRewriter::saveLeftHandSide(
generateCleanupIfAny(loweredLhs.elementalCleanup);
if (loweredLhs.vectorSubscriptLoopNest) {
constructStack.pop_back();
- builder.setInsertionPointAfter(
- loweredLhs.vectorSubscriptLoopNest->outerLoop);
+ builder.setInsertionPointAfter(loweredLhs.vectorSubscriptLoopNest->outerOp);
}
}
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
index d05a3258cf293c..166649d955dabd 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
@@ -483,7 +483,7 @@ llvm::LogicalResult ElementalAssignBufferization::matchAndRewrite(
// hlfir.elemental region inside the inner loop
hlfir::LoopNest loopNest =
hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered());
- builder.setInsertionPointToStart(loopNest.innerLoop.getBody());
+ builder.setInsertionPointToStart(loopNest.body);
auto yield = hlfir::inlineElementalOp(loc, builder, elemental,
loopNest.oneBasedIndices);
hlfir::Entity elementValue{yield.getElementValue()};
@@ -554,7 +554,7 @@ llvm::LogicalResult BroadcastAssignBufferization::matchAndRewrite(
hlfir::getIndexExtents(loc, builder, shape);
hlfir::LoopNest loopNest =
hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true);
- builder.setInsertionPointToStart(loopNest.innerLoop.getBody());
+ builder.setInsertionPointToStart(loopNest.body);
auto arrayElement =
hlfir::getElementAt(loc, builder, lhs, loopNest.oneBasedIndices);
builder.create<hlfir::AssignOp>(loc, rhs, arrayElement);
@@ -652,7 +652,7 @@ llvm::LogicalResult VariableAssignBufferization::matchAndRewrite(
hlfir::getIndexExtents(loc, builder, shape);
hlfir::LoopNest loopNest =
hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true);
- builder.setInsertionPointToStart(loopNest.innerLoop.getBody());
+ builder.setInsertionPointToStart(loopNest.body);
auto rhsArrayElement =
hlfir::getElementAt(loc, builder, rhs, loopNest.oneBasedIndices);
rhsArrayElement = hlfir::loadTrivialScalar(loc, builder, rhsArrayElement);
>From e2ac3619c68851a67668ade6769451cff90df69b Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Sun, 4 Aug 2024 22:06:55 +0900
Subject: [PATCH 22/22] [flang] Lower omp.workshare to other omp constructs
Change to workshare loop wrapper op
Move single op declaration
Schedule pass properly
Correctly handle nested nested loop nests to be parallelized by workshare
Leave comments for shouldUseWorkshareLowering
Use copyprivate to scatter val from omp.single
TODO still need to implement copy function
TODO transitive check for usage outside of omp.single not imiplemented yet
Transitively check for users outisde of single op
TODO need to implement copy func
TODO need to hoist allocas outside of single regions
Add tests
Hoist allocas
More tests
Emit body for copy func
Test the tmp storing logic
Clean up trivially dead ops
Only handle single-block regions for now
Fix tests for custom assembly for loop wrapper
Only run the lower workshare pass if openmp is enabled
Implement some missing functionality
Fix tests
Fix test
Iterate backwards to find all trivially dead ops
Add expalanation comment for createCopyFun
Update test
Emit a proper error message for CFG in workshare
Cleanup tests
Fix todo tests
Fix dst src in copy function
Use omp.single to handle CFG cases
Fix lower workshare tests
Different warning
Fix bug and add better clarification comments
Fix message
Fix tests
Do not emit empty omp.single's
LowerWorkshare tests
pipelines fix
---
flang/include/flang/Optimizer/OpenMP/Passes.h | 5 +
.../include/flang/Optimizer/OpenMP/Passes.td | 5 +
.../flang/Optimizer/Passes/Pipelines.h | 3 +-
flang/include/flang/Tools/CrossToolHelpers.h | 1 +
flang/lib/Frontend/FrontendActions.cpp | 10 +-
flang/lib/Optimizer/OpenMP/CMakeLists.txt | 1 +
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 527 ++++++++++++++++++
flang/lib/Optimizer/Passes/Pipelines.cpp | 6 +-
flang/test/Fir/basic-program.fir | 1 +
.../OpenMP/lower-workshare-alloca.mlir | 53 ++
.../OpenMP/lower-workshare-binding.mlir | 49 ++
.../OpenMP/lower-workshare-cleanup.mlir | 57 ++
.../OpenMP/lower-workshare-copyprivate.mlir | 73 +++
.../lower-workshare-correct-parallelize.mlir | 25 +
.../OpenMP/lower-workshare-no-single.mlir | 19 +
.../OpenMP/lower-workshare-nowait.mlir | 23 +
.../OpenMP/lower-workshare-todo-cfg-dom.mlir | 26 +
.../OpenMP/lower-workshare-todo-cfg.mlir | 23 +
flang/tools/bbc/bbc.cpp | 5 +-
flang/tools/tco/tco.cpp | 1 +
20 files changed, 908 insertions(+), 5 deletions(-)
create mode 100644 flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-binding.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-nowait.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
diff --git a/flang/include/flang/Optimizer/OpenMP/Passes.h b/flang/include/flang/Optimizer/OpenMP/Passes.h
index 403d79667bf448..feb395f1a12dbd 100644
--- a/flang/include/flang/Optimizer/OpenMP/Passes.h
+++ b/flang/include/flang/Optimizer/OpenMP/Passes.h
@@ -25,6 +25,11 @@ namespace flangomp {
#define GEN_PASS_REGISTRATION
#include "flang/Optimizer/OpenMP/Passes.h.inc"
+/// Impelements the logic specified in the 2.8.3 workshare Construct section of
+/// the OpenMP standard which specifies what statements or constructs shall be
+/// divided into units of work.
+bool shouldUseWorkshareLowering(mlir::Operation *op);
+
} // namespace flangomp
#endif // FORTRAN_OPTIMIZER_OPENMP_PASSES_H
diff --git a/flang/include/flang/Optimizer/OpenMP/Passes.td b/flang/include/flang/Optimizer/OpenMP/Passes.td
index c070bc22ff20cc..37977334c1e9ed 100644
--- a/flang/include/flang/Optimizer/OpenMP/Passes.td
+++ b/flang/include/flang/Optimizer/OpenMP/Passes.td
@@ -50,4 +50,9 @@ def FunctionFilteringPass : Pass<"omp-function-filtering"> {
];
}
+// Needs to be scheduled on Module as we create functions in it
+def LowerWorkshare : Pass<"lower-workshare", "::mlir::ModuleOp"> {
+ let summary = "Lower workshare construct";
+}
+
#endif //FORTRAN_OPTIMIZER_OPENMP_PASSES
diff --git a/flang/include/flang/Optimizer/Passes/Pipelines.h b/flang/include/flang/Optimizer/Passes/Pipelines.h
index 3b54ac38838587..55fafc2e6b36fe 100644
--- a/flang/include/flang/Optimizer/Passes/Pipelines.h
+++ b/flang/include/flang/Optimizer/Passes/Pipelines.h
@@ -123,7 +123,8 @@ void createDefaultFIROptimizerPassPipeline(mlir::PassManager &pm,
/// \param optLevel - optimization level used for creating FIR optimization
/// passes pipeline
void createHLFIRToFIRPassPipeline(
- mlir::PassManager &pm, llvm::OptimizationLevel optLevel = defaultOptLevel);
+ mlir::PassManager &pm, bool enableOpenMP,
+ llvm::OptimizationLevel optLevel = defaultOptLevel);
/// Create a pass pipeline for handling certain OpenMP transformations needed
/// prior to FIR lowering.
diff --git a/flang/include/flang/Tools/CrossToolHelpers.h b/flang/include/flang/Tools/CrossToolHelpers.h
index df4b21ada058fe..d936b739e58157 100644
--- a/flang/include/flang/Tools/CrossToolHelpers.h
+++ b/flang/include/flang/Tools/CrossToolHelpers.h
@@ -123,6 +123,7 @@ struct MLIRToLLVMPassPipelineConfig : public FlangEPCallBacks {
false; ///< Set no-signed-zeros-fp-math attribute for functions.
bool UnsafeFPMath = false; ///< Set unsafe-fp-math attribute for functions.
bool NSWOnLoopVarInc = false; ///< Add nsw flag to loop variable increments.
+ bool EnableOpenMP = false; ///< Enable OpenMP lowering.
};
struct OffloadModuleOpts {
diff --git a/flang/lib/Frontend/FrontendActions.cpp b/flang/lib/Frontend/FrontendActions.cpp
index f2e460fc53a67f..8c21fe18e67b4d 100644
--- a/flang/lib/Frontend/FrontendActions.cpp
+++ b/flang/lib/Frontend/FrontendActions.cpp
@@ -715,7 +715,11 @@ void CodeGenAction::lowerHLFIRToFIR() {
pm.enableVerifier(/*verifyPasses=*/true);
// Create the pass pipeline
- fir::createHLFIRToFIRPassPipeline(pm, level);
+ fir::createHLFIRToFIRPassPipeline(
+ pm,
+ ci.getInvocation().getFrontendOpts().features.IsEnabled(
+ Fortran::common::LanguageFeature::OpenMP),
+ level);
(void)mlir::applyPassManagerCLOptions(pm);
if (!mlir::succeeded(pm.run(*mlirModule))) {
@@ -828,6 +832,10 @@ void CodeGenAction::generateLLVMIR() {
config.VScaleMax = vsr->second;
}
+ if (ci.getInvocation().getFrontendOpts().features.IsEnabled(
+ Fortran::common::LanguageFeature::OpenMP))
+ config.EnableOpenMP = true;
+
if (ci.getInvocation().getLoweringOpts().getNSWOnLoopVarInc())
config.NSWOnLoopVarInc = true;
diff --git a/flang/lib/Optimizer/OpenMP/CMakeLists.txt b/flang/lib/Optimizer/OpenMP/CMakeLists.txt
index 035d0d5ca46c76..b1e0dbf6e707e5 100644
--- a/flang/lib/Optimizer/OpenMP/CMakeLists.txt
+++ b/flang/lib/Optimizer/OpenMP/CMakeLists.txt
@@ -5,6 +5,7 @@ add_flang_library(FlangOpenMPTransforms
MapsForPrivatizedSymbols.cpp
MapInfoFinalization.cpp
MarkDeclareTarget.cpp
+ LowerWorkshare.cpp
DEPENDS
FIRDialect
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
new file mode 100644
index 00000000000000..225c585a02d913
--- /dev/null
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -0,0 +1,527 @@
+//===- LowerWorkshare.cpp - special cases for bufferization -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the lowering of omp.workshare to other omp constructs.
+//
+// This pass is tasked with parallelizing the loops nested in
+// workshare.loop_wrapper while both the Fortran to mlir lowering and the hlfir
+// to fir lowering pipelines are responsible for emitting the
+// workshare.loop_wrapper ops where appropriate according to the
+// `shouldUseWorkshareLowering` function.
+//
+//===----------------------------------------------------------------------===//
+
+#include <flang/Optimizer/Builder/FIRBuilder.h>
+#include <flang/Optimizer/Dialect/FIROps.h>
+#include <flang/Optimizer/Dialect/FIRType.h>
+#include <flang/Optimizer/HLFIR/HLFIROps.h>
+#include <flang/Optimizer/OpenMP/Passes.h>
+#include <llvm/ADT/BreadthFirstIterator.h>
+#include <llvm/ADT/STLExtras.h>
+#include <llvm/ADT/SmallVectorExtras.h>
+#include <llvm/ADT/iterator_range.h>
+#include <llvm/Support/ErrorHandling.h>
+#include <mlir/Dialect/Arith/IR/Arith.h>
+#include <mlir/Dialect/LLVMIR/LLVMTypes.h>
+#include <mlir/Dialect/OpenMP/OpenMPClauseOperands.h>
+#include <mlir/Dialect/OpenMP/OpenMPDialect.h>
+#include <mlir/Dialect/SCF/IR/SCF.h>
+#include <mlir/IR/BuiltinOps.h>
+#include <mlir/IR/IRMapping.h>
+#include <mlir/IR/OpDefinition.h>
+#include <mlir/IR/PatternMatch.h>
+#include <mlir/IR/Value.h>
+#include <mlir/IR/Visitors.h>
+#include <mlir/Interfaces/SideEffectInterfaces.h>
+#include <mlir/Support/LLVM.h>
+
+#include <variant>
+
+namespace flangomp {
+#define GEN_PASS_DEF_LOWERWORKSHARE
+#include "flang/Optimizer/OpenMP/Passes.h.inc"
+} // namespace flangomp
+
+#define DEBUG_TYPE "lower-workshare"
+
+using namespace mlir;
+
+namespace flangomp {
+
+// Checks for nesting pattern below as we need to avoid sharing the work of
+// statements which are nested in some constructs such as omp.critical or
+// another omp.parallel.
+//
+// omp.workshare { // `wsOp`
+// ...
+// omp.T { // `parent`
+// ...
+// `op`
+//
+template <typename T>
+static bool isNestedIn(omp::WorkshareOp wsOp, Operation *op) {
+ T parent = op->getParentOfType<T>();
+ if (!parent)
+ return false;
+ return wsOp->isProperAncestor(parent);
+}
+
+bool shouldUseWorkshareLowering(Operation *op) {
+ auto parentWorkshare = op->getParentOfType<omp::WorkshareOp>();
+
+ if (!parentWorkshare)
+ return false;
+
+ if (isNestedIn<omp::CriticalOp>(parentWorkshare, op))
+ return false;
+
+ // 2.8.3 workshare Construct
+ // For a parallel construct, the construct is a unit of work with respect to
+ // the workshare construct. The statements contained in the parallel construct
+ // are executed by a new thread team.
+ if (isNestedIn<omp::ParallelOp>(parentWorkshare, op))
+ return false;
+
+ // 2.8.2 single Construct
+ // Binding The binding thread set for a single region is the current team. A
+ // single region binds to the innermost enclosing parallel region.
+ // Description Only one of the encountering threads will execute the
+ // structured block associated with the single construct.
+ if (isNestedIn<omp::SingleOp>(parentWorkshare, op))
+ return false;
+
+ // Do not use workshare lowering until we support CFG in omp.workshare
+ if (parentWorkshare.getRegion().getBlocks().size() != 1)
+ return false;
+
+ return true;
+}
+
+} // namespace flangomp
+
+namespace {
+
+struct SingleRegion {
+ Block::iterator begin, end;
+};
+
+static bool mustParallelizeOp(Operation *op) {
+ return op
+ ->walk([&](Operation *nested) {
+ // We need to be careful not to pick up workshare.loop_wrapper in nested
+ // omp.parallel{omp.workshare} regions, i.e. make sure that `nested`
+ // binds to the workshare region we are currently handling.
+ //
+ // For example:
+ //
+ // omp.parallel {
+ // omp.workshare { // currently handling this
+ // omp.parallel {
+ // omp.workshare { // nested workshare
+ // omp.workshare.loop_wrapper {}
+ //
+ // Therefore, we skip if we encounter a nested omp.workshare.
+ if (isa<omp::WorkshareOp>(op))
+ return WalkResult::skip();
+ if (isa<omp::WorkshareLoopWrapperOp>(op))
+ return WalkResult::interrupt();
+ return WalkResult::advance();
+ })
+ .wasInterrupted();
+}
+
+static bool isSafeToParallelize(Operation *op) {
+ return isa<hlfir::DeclareOp>(op) || isa<fir::DeclareOp>(op) ||
+ isMemoryEffectFree(op);
+}
+
+/// Simple shallow copies suffice for our purposes in this pass, so we implement
+/// this simpler alternative to the full fledged `createCopyFunc` in the
+/// frontend
+static mlir::func::FuncOp createCopyFunc(mlir::Location loc, mlir::Type varType,
+ fir::FirOpBuilder builder) {
+ mlir::ModuleOp module = builder.getModule();
+ auto rt = cast<fir::ReferenceType>(varType);
+ mlir::Type eleTy = rt.getEleTy();
+ std::string copyFuncName =
+ fir::getTypeAsString(eleTy, builder.getKindMap(), "_workshare_copy");
+
+ if (auto decl = module.lookupSymbol<mlir::func::FuncOp>(copyFuncName))
+ return decl;
+ // create function
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ mlir::OpBuilder modBuilder(module.getBodyRegion());
+ llvm::SmallVector<mlir::Type> argsTy = {varType, varType};
+ auto funcType = mlir::FunctionType::get(builder.getContext(), argsTy, {});
+ mlir::func::FuncOp funcOp =
+ modBuilder.create<mlir::func::FuncOp>(loc, copyFuncName, funcType);
+ funcOp.setVisibility(mlir::SymbolTable::Visibility::Private);
+ builder.createBlock(&funcOp.getRegion(), funcOp.getRegion().end(), argsTy,
+ {loc, loc});
+ builder.setInsertionPointToStart(&funcOp.getRegion().back());
+
+ Value loaded = builder.create<fir::LoadOp>(loc, funcOp.getArgument(1));
+ builder.create<fir::StoreOp>(loc, loaded, funcOp.getArgument(0));
+
+ builder.create<mlir::func::ReturnOp>(loc);
+ return funcOp;
+}
+
+static bool isUserOutsideSR(Operation *user, Operation *parentOp,
+ SingleRegion sr) {
+ while (user->getParentOp() != parentOp)
+ user = user->getParentOp();
+ return sr.begin->getBlock() != user->getBlock() ||
+ !(user->isBeforeInBlock(&*sr.end) && sr.begin->isBeforeInBlock(user));
+}
+
+static bool isTransitivelyUsedOutside(Value v, SingleRegion sr) {
+ Block *srBlock = sr.begin->getBlock();
+ Operation *parentOp = srBlock->getParentOp();
+
+ for (auto &use : v.getUses()) {
+ Operation *user = use.getOwner();
+ if (isUserOutsideSR(user, parentOp, sr))
+ return true;
+
+ // Now we know user is inside `sr`.
+
+ // Results of nested users cannot be used outside of `sr`.
+ if (user->getBlock() != srBlock)
+ continue;
+
+ // A non-safe to parallelize operation will be checked for uses outside
+ // separately.
+ if (!isSafeToParallelize(user))
+ continue;
+
+ // For safe to parallelize operations, we need to check if there is a
+ // transitive use of `v` through them.
+ for (auto res : user->getResults())
+ if (isTransitivelyUsedOutside(res, sr))
+ return true;
+ }
+ return false;
+}
+
+/// We clone pure operations in both the parallel and single blocks. this
+/// functions cleans them up if they end up with no uses
+static void cleanupBlock(Block *block) {
+ for (Operation &op : llvm::make_early_inc_range(
+ llvm::make_range(block->rbegin(), block->rend())))
+ if (isOpTriviallyDead(&op))
+ op.erase();
+}
+
+static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
+ IRMapping &rootMapping, Location loc,
+ mlir::DominanceInfo &di) {
+ OpBuilder rootBuilder(sourceRegion.getContext());
+ ModuleOp m = sourceRegion.getParentOfType<ModuleOp>();
+ OpBuilder copyFuncBuilder(m.getBodyRegion());
+ fir::FirOpBuilder firCopyFuncBuilder(copyFuncBuilder, m);
+
+ auto mapReloadedValue =
+ [&](Value v, OpBuilder allocaBuilder, OpBuilder singleBuilder,
+ OpBuilder parallelBuilder, IRMapping singleMapping) -> Value {
+ if (auto reloaded = rootMapping.lookupOrNull(v))
+ return nullptr;
+ Type ty = v.getType();
+ Value alloc = allocaBuilder.create<fir::AllocaOp>(loc, ty);
+ singleBuilder.create<fir::StoreOp>(loc, singleMapping.lookup(v), alloc);
+ Value reloaded = parallelBuilder.create<fir::LoadOp>(loc, ty, alloc);
+ rootMapping.map(v, reloaded);
+ return alloc;
+ };
+
+ auto moveToSingle =
+ [&](SingleRegion sr, OpBuilder allocaBuilder, OpBuilder singleBuilder,
+ OpBuilder parallelBuilder) -> std::pair<bool, SmallVector<Value>> {
+ IRMapping singleMapping = rootMapping;
+ SmallVector<Value> copyPrivate;
+ bool allParallelized = true;
+
+ for (Operation &op : llvm::make_range(sr.begin, sr.end)) {
+ if (isSafeToParallelize(&op)) {
+ singleBuilder.clone(op, singleMapping);
+ if (llvm::all_of(op.getOperands(), [&](Value opr) {
+ // Either we have already remapped it
+ bool remapped = rootMapping.contains(opr);
+ // Or it is available because it dominates `sr`
+ bool dominates =
+ di.properlyDominates(opr.getDefiningOp(), &*sr.begin);
+ return remapped || dominates;
+ })) {
+ // Safe to parallelize operations which have all operands available in
+ // the root parallel block can be executed there.
+ parallelBuilder.clone(op, rootMapping);
+ } else {
+ // If any operand was not available, it means that there was no
+ // transitive use of a non-safe-to-parallelize operation outside `sr`.
+ // This means that there should be no transitive uses outside `sr` of
+ // `op`.
+ assert(llvm::all_of(op.getResults(), [&](Value v) {
+ return !isTransitivelyUsedOutside(v, sr);
+ }));
+ allParallelized = false;
+ }
+ } else if (auto alloca = dyn_cast<fir::AllocaOp>(&op)) {
+ auto hoisted =
+ cast<fir::AllocaOp>(allocaBuilder.clone(*alloca, singleMapping));
+ rootMapping.map(&*alloca, &*hoisted);
+ rootMapping.map(alloca.getResult(), hoisted.getResult());
+ copyPrivate.push_back(hoisted);
+ allParallelized = false;
+ } else {
+ singleBuilder.clone(op, singleMapping);
+ // Prepare reloaded values for results of operations that cannot be
+ // safely parallelized and which are used after the region `sr`.
+ for (auto res : op.getResults()) {
+ if (isTransitivelyUsedOutside(res, sr)) {
+ auto alloc = mapReloadedValue(res, allocaBuilder, singleBuilder,
+ parallelBuilder, singleMapping);
+ if (alloc)
+ copyPrivate.push_back(alloc);
+ }
+ }
+ allParallelized = false;
+ }
+ }
+ singleBuilder.create<omp::TerminatorOp>(loc);
+ return {allParallelized, copyPrivate};
+ };
+
+ for (Block &block : sourceRegion) {
+ Block *targetBlock = rootBuilder.createBlock(
+ &targetRegion, {}, block.getArgumentTypes(),
+ llvm::map_to_vector(block.getArguments(),
+ [](BlockArgument arg) { return arg.getLoc(); }));
+ rootMapping.map(&block, targetBlock);
+ rootMapping.map(block.getArguments(), targetBlock->getArguments());
+ }
+
+ auto handleOneBlock = [&](Block &block) {
+ Block &targetBlock = *rootMapping.lookup(&block);
+ rootBuilder.setInsertionPointToStart(&targetBlock);
+ Operation *terminator = block.getTerminator();
+ SmallVector<std::variant<SingleRegion, Operation *>> regions;
+
+ auto it = block.begin();
+ auto getOneRegion = [&]() {
+ if (&*it == terminator)
+ return false;
+ if (mustParallelizeOp(&*it)) {
+ regions.push_back(&*it);
+ it++;
+ return true;
+ }
+ SingleRegion sr;
+ sr.begin = it;
+ while (&*it != terminator && !mustParallelizeOp(&*it))
+ it++;
+ sr.end = it;
+ assert(sr.begin != sr.end);
+ regions.push_back(sr);
+ return true;
+ };
+ while (getOneRegion())
+ ;
+
+ for (auto [i, opOrSingle] : llvm::enumerate(regions)) {
+ bool isLast = i + 1 == regions.size();
+ if (std::holds_alternative<SingleRegion>(opOrSingle)) {
+ OpBuilder singleBuilder(sourceRegion.getContext());
+ Block *singleBlock = new Block();
+ singleBuilder.setInsertionPointToStart(singleBlock);
+
+ OpBuilder allocaBuilder(sourceRegion.getContext());
+ Block *allocaBlock = new Block();
+ allocaBuilder.setInsertionPointToStart(allocaBlock);
+
+ OpBuilder parallelBuilder(sourceRegion.getContext());
+ Block *parallelBlock = new Block();
+ parallelBuilder.setInsertionPointToStart(parallelBlock);
+
+ auto [allParallelized, copyprivateVars] =
+ moveToSingle(std::get<SingleRegion>(opOrSingle), allocaBuilder,
+ singleBuilder, parallelBuilder);
+ if (allParallelized) {
+ // The single region was not required as all operations were safe to
+ // parallelize
+ assert(copyprivateVars.empty());
+ assert(allocaBlock->empty());
+ delete singleBlock;
+ } else {
+ omp::SingleOperands singleOperands;
+ if (isLast)
+ singleOperands.nowait = rootBuilder.getUnitAttr();
+ singleOperands.copyprivateVars = copyprivateVars;
+ cleanupBlock(singleBlock);
+ for (auto var : singleOperands.copyprivateVars) {
+ mlir::func::FuncOp funcOp =
+ createCopyFunc(loc, var.getType(), firCopyFuncBuilder);
+ singleOperands.copyprivateSyms.push_back(
+ SymbolRefAttr::get(funcOp));
+ }
+ omp::SingleOp singleOp =
+ rootBuilder.create<omp::SingleOp>(loc, singleOperands);
+ singleOp.getRegion().push_back(singleBlock);
+ targetRegion.front().getOperations().splice(
+ singleOp->getIterator(), allocaBlock->getOperations());
+ }
+ rootBuilder.getInsertionBlock()->getOperations().splice(
+ rootBuilder.getInsertionPoint(), parallelBlock->getOperations());
+ delete allocaBlock;
+ delete parallelBlock;
+ } else {
+ auto op = std::get<Operation *>(opOrSingle);
+ if (auto wslw = dyn_cast<omp::WorkshareLoopWrapperOp>(op)) {
+ omp::WsloopOperands wsloopOperands;
+ if (isLast)
+ wsloopOperands.nowait = rootBuilder.getUnitAttr();
+ auto wsloop =
+ rootBuilder.create<mlir::omp::WsloopOp>(loc, wsloopOperands);
+ auto clonedWslw = cast<omp::WorkshareLoopWrapperOp>(
+ rootBuilder.clone(*wslw, rootMapping));
+ wsloop.getRegion().takeBody(clonedWslw.getRegion());
+ clonedWslw->erase();
+ } else {
+ assert(mustParallelizeOp(op));
+ Operation *cloned = rootBuilder.cloneWithoutRegions(*op, rootMapping);
+ for (auto [region, clonedRegion] :
+ llvm::zip(op->getRegions(), cloned->getRegions()))
+ parallelizeRegion(region, clonedRegion, rootMapping, loc, di);
+ }
+ }
+ }
+
+ rootBuilder.clone(*block.getTerminator(), rootMapping);
+ };
+
+ if (sourceRegion.hasOneBlock()) {
+ handleOneBlock(sourceRegion.front());
+ } else {
+ auto &domTree = di.getDomTree(&sourceRegion);
+ for (auto node : llvm::breadth_first(domTree.getRootNode())) {
+ handleOneBlock(*node->getBlock());
+ }
+ }
+
+ for (Block &targetBlock : targetRegion)
+ cleanupBlock(&targetBlock);
+}
+
+/// Lowers workshare to a sequence of single-thread regions and parallel loops
+///
+/// For example:
+///
+/// omp.workshare {
+/// %a = fir.allocmem
+/// omp.workshare.loop_wrapper {}
+/// fir.call Assign %b %a
+/// fir.freemem %a
+/// }
+///
+/// becomes
+///
+/// %tmp = fir.alloca
+/// omp.single copyprivate(%tmp) {
+/// %a = fir.allocmem
+/// fir.store %a %tmp
+/// }
+/// %a_reloaded = fir.load %tmp
+/// omp.workshare.loop_wrapper {}
+/// omp.single {
+/// fir.call Assign %b %a_reloaded
+/// fir.freemem %a_reloaded
+/// }
+///
+/// Note that we allocate temporary memory for values in omp.single's which need
+/// to be accessed by all threads and broadcast them using single's copyprivate
+LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) {
+ Location loc = wsOp->getLoc();
+ IRMapping rootMapping;
+
+ OpBuilder rootBuilder(wsOp);
+
+ // FIXME Currently, we only support workshare constructs with structured
+ // control flow. The transformation itself supports CFG, however, once we
+ // transform the MLIR region in the omp.workshare, we need to inline that
+ // region in the parent block. We have no guarantees at this point of the
+ // pipeline that the parent op supports CFG (e.g. fir.if), thus this is not
+ // generally possible. The alternative is to put the lowered region in an
+ // operation akin to scf.execute_region, which will get lowered at the same
+ // time when fir ops get lowered to CFG. However, SCF is not registered in
+ // flang so we cannot use it. Remove this requirement once we have
+ // scf.execute_region or an alternative operation available.
+ if (wsOp.getRegion().getBlocks().size() == 1) {
+ // This operation is just a placeholder which will be erased later. We need
+ // it because our `parallelizeRegion` function works on regions and not
+ // blocks.
+ omp::WorkshareOp newOp =
+ rootBuilder.create<omp::WorkshareOp>(loc, omp::WorkshareOperands());
+ if (!wsOp.getNowait())
+ rootBuilder.create<omp::BarrierOp>(loc);
+
+ parallelizeRegion(wsOp.getRegion(), newOp.getRegion(), rootMapping, loc,
+ di);
+
+ // Inline the contents of the placeholder workshare op into its parent
+ // block.
+ Block *theBlock = &newOp.getRegion().front();
+ Operation *term = theBlock->getTerminator();
+ Block *parentBlock = wsOp->getBlock();
+ parentBlock->getOperations().splice(newOp->getIterator(),
+ theBlock->getOperations());
+ assert(term->getNumOperands() == 0);
+ term->erase();
+ newOp->erase();
+ wsOp->erase();
+ } else {
+ // Otherwise just change the operation to an omp.single.
+
+ wsOp->emitWarning(
+ "omp workshare with unstructured control flow is currently "
+ "unsupported and will be serialized.");
+
+ // `shouldUseWorkshareLowering` should have guaranteed that there are no
+ // omp.workshare_loop_wrapper's that bind to this omp.workshare.
+ assert(!wsOp->walk([&](Operation *op) {
+ // Nested omp.workshare can have their own
+ // omp.workshare_loop_wrapper's.
+ if (isa<omp::WorkshareOp>(op))
+ return WalkResult::skip();
+ if (isa<omp::WorkshareLoopWrapperOp>(op))
+ return WalkResult::interrupt();
+ return WalkResult::advance();
+ })
+ .wasInterrupted());
+
+ omp::SingleOperands operands;
+ operands.nowait = wsOp.getNowaitAttr();
+ omp::SingleOp newOp = rootBuilder.create<omp::SingleOp>(loc, operands);
+
+ newOp.getRegion().getBlocks().splice(newOp.getRegion().getBlocks().begin(),
+ wsOp.getRegion().getBlocks());
+ wsOp->erase();
+ }
+ return success();
+}
+
+class LowerWorksharePass
+ : public flangomp::impl::LowerWorkshareBase<LowerWorksharePass> {
+public:
+ void runOnOperation() override {
+ mlir::DominanceInfo &di = getAnalysis<mlir::DominanceInfo>();
+ getOperation()->walk([&](mlir::omp::WorkshareOp wsOp) {
+ if (failed(lowerWorkshare(wsOp, di)))
+ signalPassFailure();
+ });
+ }
+};
+} // namespace
diff --git a/flang/lib/Optimizer/Passes/Pipelines.cpp b/flang/lib/Optimizer/Passes/Pipelines.cpp
index a9144079915912..31af3531641dda 100644
--- a/flang/lib/Optimizer/Passes/Pipelines.cpp
+++ b/flang/lib/Optimizer/Passes/Pipelines.cpp
@@ -212,7 +212,7 @@ void createDefaultFIROptimizerPassPipeline(mlir::PassManager &pm,
/// \param pm - MLIR pass manager that will hold the pipeline definition
/// \param optLevel - optimization level used for creating FIR optimization
/// passes pipeline
-void createHLFIRToFIRPassPipeline(mlir::PassManager &pm,
+void createHLFIRToFIRPassPipeline(mlir::PassManager &pm, bool enableOpenMP,
llvm::OptimizationLevel optLevel) {
if (optLevel.isOptimizingForSpeed()) {
addCanonicalizerPassWithoutRegionSimplification(pm);
@@ -230,6 +230,8 @@ void createHLFIRToFIRPassPipeline(mlir::PassManager &pm,
pm.addPass(hlfir::createLowerHLFIRIntrinsics());
pm.addPass(hlfir::createBufferizeHLFIR());
pm.addPass(hlfir::createConvertHLFIRtoFIR());
+ if (enableOpenMP)
+ pm.addPass(flangomp::createLowerWorkshare());
}
/// Create a pass pipeline for handling certain OpenMP transformations needed
@@ -303,7 +305,7 @@ void createDefaultFIRCodeGenPassPipeline(mlir::PassManager &pm,
void createMLIRToLLVMPassPipeline(mlir::PassManager &pm,
MLIRToLLVMPassPipelineConfig &config,
llvm::StringRef inputFilename) {
- fir::createHLFIRToFIRPassPipeline(pm, config.OptLevel);
+ fir::createHLFIRToFIRPassPipeline(pm, config.EnableOpenMP, config.OptLevel);
// Add default optimizer pass pipeline.
fir::createDefaultFIROptimizerPassPipeline(pm, config);
diff --git a/flang/test/Fir/basic-program.fir b/flang/test/Fir/basic-program.fir
index bca454c13ff9cc..4b18acb7c2b430 100644
--- a/flang/test/Fir/basic-program.fir
+++ b/flang/test/Fir/basic-program.fir
@@ -47,6 +47,7 @@ func.func @_QQmain() {
// PASSES-NEXT: LowerHLFIRIntrinsics
// PASSES-NEXT: BufferizeHLFIR
// PASSES-NEXT: ConvertHLFIRtoFIR
+// PASSES-NEXT: LowerWorkshare
// PASSES-NEXT: CSE
// PASSES-NEXT: (S) 0 num-cse'd - Number of operations CSE'd
// PASSES-NEXT: (S) 0 num-dce'd - Number of operations DCE'd
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
new file mode 100644
index 00000000000000..12b0558d06ed58
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
@@ -0,0 +1,53 @@
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Checks that fir.alloca is hoisted out and copyprivate'd
+func.func @wsfunc() {
+ omp.workshare {
+ %c1 = arith.constant 1 : index
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %alloc = fir.alloca i32
+ fir.store %c1_i32 to %alloc : !fir.ref<i32>
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ "test.test1"(%alloc) : (!fir.ref<i32>) -> ()
+ omp.yield
+ }
+ }
+ "test.test2"(%alloc) : (!fir.ref<i32>) -> ()
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: func.func private @_workshare_copy_i32(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32>,
+// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32>) {
+// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+// CHECK: fir.store %[[VAL_2]] to %[[VAL_0]] : !fir.ref<i32>
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func @wsfunc() {
+// CHECK: %[[VAL_0:.*]] = fir.alloca i32
+// CHECK: omp.single copyprivate(%[[VAL_0]] -> @_workshare_copy_i32 : !fir.ref<i32>) {
+// CHECK: %[[VAL_1:.*]] = arith.constant 1 : i32
+// CHECK: fir.store %[[VAL_1]] to %[[VAL_0]] : !fir.ref<i32>
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_2:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_3:.*]] = arith.constant 42 : index
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_4:.*]]) : index = (%[[VAL_2]]) to (%[[VAL_3]]) inclusive step (%[[VAL_2]]) {
+// CHECK: "test.test1"(%[[VAL_0]]) : (!fir.ref<i32>) -> ()
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: }
+// CHECK: omp.single nowait {
+// CHECK: "test.test2"(%[[VAL_0]]) : (!fir.ref<i32>) -> ()
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.barrier
+// CHECK: return
+// CHECK: }
+
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir b/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir
new file mode 100644
index 00000000000000..f1d0e8e2296140
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir
@@ -0,0 +1,49 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Checks that the omp.workshare.loop_wrapper binds to the correct omp.workshare
+
+func.func @wsfunc() {
+ %c1 = arith.constant 1 : index
+ %c42 = arith.constant 42 : index
+ omp.parallel {
+ omp.workshare nowait {
+ omp.parallel {
+ omp.workshare nowait {
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ "test.test2"() : () -> ()
+ omp.yield
+ }
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: func.func @wsfunc() {
+// CHECK: %[[VAL_0:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_1:.*]] = arith.constant 42 : index
+// CHECK: omp.parallel {
+// CHECK: omp.single nowait {
+// CHECK: omp.parallel {
+// CHECK: omp.wsloop nowait {
+// CHECK: omp.loop_nest (%[[VAL_2:.*]]) : index = (%[[VAL_0]]) to (%[[VAL_1]]) inclusive step (%[[VAL_0]]) {
+// CHECK: "test.test2"() : () -> ()
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir b/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir
new file mode 100644
index 00000000000000..ca288917a3ac49
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir
@@ -0,0 +1,57 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Check that we cleanup unused pure operations from the parallel and single
+// regions
+
+// CHECK-LABEL: func.func @wsfunc() {
+// CHECK: %[[VAL_0:.*]] = fir.alloca i32
+// CHECK: omp.parallel {
+// CHECK: omp.single {
+// CHECK: %[[VAL_1:.*]] = "test.test1"() : () -> i32
+// CHECK: %[[VAL_2:.*]] = arith.constant 2 : index
+// CHECK: %[[VAL_3:.*]] = arith.constant 3 : index
+// CHECK: %[[VAL_4:.*]] = arith.addi %[[VAL_2]], %[[VAL_3]] : index
+// CHECK: "test.test3"(%[[VAL_4]]) : (index) -> ()
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_5:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_6:.*]] = arith.constant 42 : index
+// CHECK: omp.wsloop nowait {
+// CHECK: omp.loop_nest (%[[VAL_7:.*]]) : index = (%[[VAL_5]]) to (%[[VAL_6]]) inclusive step (%[[VAL_5]]) {
+// CHECK: "test.test2"() : () -> ()
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: }
+// CHECK: omp.barrier
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+func.func @wsfunc() {
+ %a = fir.alloca i32
+ omp.parallel {
+ omp.workshare {
+ %t1 = "test.test1"() : () -> i32
+
+ %c1 = arith.constant 1 : index
+ %c42 = arith.constant 42 : index
+
+ %c2 = arith.constant 2 : index
+ %c3 = arith.constant 3 : index
+ %add = arith.addi %c2, %c3 : index
+ "test.test3"(%add) : (index) -> ()
+
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ "test.test2"() : () -> ()
+ omp.yield
+ }
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir b/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir
new file mode 100644
index 00000000000000..d7a04e198ceed9
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir
@@ -0,0 +1,73 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+
+// Check if we store the correct values
+
+func.func @wsfunc() {
+ omp.parallel {
+ // CHECK: fir.alloca
+ // CHECK: fir.alloca
+ // CHECK: fir.alloca
+ // CHECK: fir.alloca
+ // CHECK: fir.alloca
+ // CHECK-NOT: fir.alloca
+ omp.workshare {
+
+ %t1 = "test.test1"() : () -> i32
+ // CHECK: %[[T1:.*]] = "test.test1"
+ // CHECK: fir.store %[[T1]]
+ %t2 = "test.test2"() : () -> i32
+ // CHECK: %[[T2:.*]] = "test.test2"
+ // CHECK: fir.store %[[T2]]
+ %t3 = "test.test3"() : () -> i32
+ // CHECK: %[[T3:.*]] = "test.test3"
+ // CHECK-NOT: fir.store %[[T3]]
+ %t4 = "test.test4"() : () -> i32
+ // CHECK: %[[T4:.*]] = "test.test4"
+ // CHECK: fir.store %[[T4]]
+ %t5 = "test.test5"() : () -> i32
+ // CHECK: %[[T5:.*]] = "test.test5"
+ // CHECK: fir.store %[[T5]]
+ %t6 = "test.test6"() : () -> i32
+ // CHECK: %[[T6:.*]] = "test.test6"
+ // CHECK-NOT: fir.store %[[T6]]
+
+
+ "test.test1"(%t1) : (i32) -> ()
+ "test.test1"(%t2) : (i32) -> ()
+ "test.test1"(%t3) : (i32) -> ()
+
+ %true = arith.constant true
+ fir.if %true {
+ "test.test2"(%t3) : (i32) -> ()
+ }
+
+ %c1_i32 = arith.constant 1 : i32
+
+ %t5_pure_use = arith.addi %t5, %c1_i32 : i32
+
+ %t6_mem_effect_use = "test.test8"(%t6) : (i32) -> i32
+ // CHECK: %[[T6_USE:.*]] = "test.test8"
+ // CHECK: fir.store %[[T6_USE]]
+
+ %c42 = arith.constant 42 : index
+ %c1 = arith.constant 1 : index
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ "test.test10"(%t1) : (i32) -> ()
+ "test.test10"(%t5_pure_use) : (i32) -> ()
+ "test.test10"(%t6_mem_effect_use) : (i32) -> ()
+ omp.yield
+ }
+ }
+
+ "test.test10"(%t2) : (i32) -> ()
+ fir.if %true {
+ "test.test10"(%t4) : (i32) -> ()
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir b/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir
new file mode 100644
index 00000000000000..31db8213b5f001
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir
@@ -0,0 +1,25 @@
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Check that the safe to parallelize `fir.declare` op will not be parallelized
+// due to its operand %alloc not being reloaded outside the omp.single.
+
+func.func @foo() {
+ %c0 = arith.constant 0 : index
+ omp.workshare {
+ %alloc = fir.allocmem !fir.array<?xf32>, %c0 {bindc_name = ".tmp.forall", uniq_name = ""}
+ %shape = fir.shape %c0 : (index) -> !fir.shape<1>
+ %declare = fir.declare %alloc(%shape) {uniq_name = ".tmp.forall"} : (!fir.heap<!fir.array<?xf32>>, !fir.shape<1>) -> !fir.heap<!fir.array<?xf32>>
+ fir.freemem %alloc : !fir.heap<!fir.array<?xf32>>
+ omp.terminator
+ }
+ return
+}
+
+// CHECK: omp.single nowait
+// CHECK: fir.allocmem
+// CHECK: fir.shape
+// CHECK: fir.declare
+// CHECK: fir.freemem
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.barrier
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir b/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir
new file mode 100644
index 00000000000000..1fd379a6e5eb48
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir
@@ -0,0 +1,19 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Check that we do not emit an omp.single for the constant operation
+
+func.func @foo() {
+ omp.workshare {
+ %c1 = arith.constant 1 : index
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c1) inclusive step (%c1) {
+ "test.test0"() : () -> ()
+ omp.yield
+ }
+ }
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-NOT: omp.single
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-nowait.mlir b/flang/test/Transforms/OpenMP/lower-workshare-nowait.mlir
new file mode 100644
index 00000000000000..940662e0bdccc2
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-nowait.mlir
@@ -0,0 +1,23 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Check that we correctly handle nowait
+
+// CHECK-LABEL: func.func @nonowait
+func.func @nonowait(%arg0: !fir.ref<!fir.array<42xi32>>) {
+ // CHECK: omp.barrier
+ omp.workshare {
+ omp.terminator
+ }
+ return
+}
+
+// -----
+
+// CHECK-LABEL: func.func @nowait
+func.func @nowait(%arg0: !fir.ref<!fir.array<42xi32>>) {
+ // CHECK-NOT: omp.barrier
+ omp.workshare nowait {
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
new file mode 100644
index 00000000000000..83c49cd635d082
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
@@ -0,0 +1,26 @@
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+
+// CHECK: warning: omp workshare with unstructured control flow is currently unsupported and will be serialized.
+
+// CHECK: omp.parallel
+// CHECK-NEXT: omp.single
+
+// TODO Check that the definition of %r dominates its use post-transform
+func.func @wsfunc() {
+ %a = fir.alloca i32
+ omp.parallel {
+ omp.workshare {
+ ^bb1:
+ %c1 = arith.constant 1 : i32
+ cf.br ^bb3(%c1: i32)
+ ^bb2:
+ "test.test2"(%r) : (i32) -> ()
+ omp.terminator
+ ^bb3(%arg1: i32):
+ %r = "test.test2"(%arg1) : (i32) -> i32
+ cf.br ^bb2
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
new file mode 100644
index 00000000000000..a27cf880694014
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
@@ -0,0 +1,23 @@
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+
+// CHECK: warning: omp workshare with unstructured control flow is currently unsupported and will be serialized.
+
+// CHECK: omp.parallel
+// CHECK-NEXT: omp.single
+
+// TODO Check transforming a simple CFG
+func.func @wsfunc() {
+ %a = fir.alloca i32
+ omp.parallel {
+ omp.workshare {
+ ^bb1:
+ %c1 = arith.constant 1 : i32
+ cf.br ^bb3(%c1: i32)
+ ^bb3(%arg1: i32):
+ "test.test2"(%arg1) : (i32) -> ()
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/tools/bbc/bbc.cpp b/flang/tools/bbc/bbc.cpp
index fe5e36f704c76c..1c24979bbcdafb 100644
--- a/flang/tools/bbc/bbc.cpp
+++ b/flang/tools/bbc/bbc.cpp
@@ -452,7 +452,8 @@ static llvm::LogicalResult convertFortranSourceToMLIR(
if (emitFIR && useHLFIR) {
// lower HLFIR to FIR
- fir::createHLFIRToFIRPassPipeline(pm, llvm::OptimizationLevel::O2);
+ fir::createHLFIRToFIRPassPipeline(pm, enableOpenMP,
+ llvm::OptimizationLevel::O2);
if (mlir::failed(pm.run(mlirModule))) {
llvm::errs() << "FATAL: lowering from HLFIR to FIR failed";
return mlir::failure();
@@ -467,6 +468,8 @@ static llvm::LogicalResult convertFortranSourceToMLIR(
// Add O2 optimizer pass pipeline.
MLIRToLLVMPassPipelineConfig config(llvm::OptimizationLevel::O2);
+ if (enableOpenMP)
+ config.EnableOpenMP = true;
config.NSWOnLoopVarInc = setNSW;
fir::registerDefaultInlinerPass(config);
fir::createDefaultFIROptimizerPassPipeline(pm, config);
diff --git a/flang/tools/tco/tco.cpp b/flang/tools/tco/tco.cpp
index 5c373c4e85258c..eaf4bae0884546 100644
--- a/flang/tools/tco/tco.cpp
+++ b/flang/tools/tco/tco.cpp
@@ -139,6 +139,7 @@ compileFIR(const mlir::PassPipelineCLParser &passPipeline) {
return mlir::failure();
} else {
MLIRToLLVMPassPipelineConfig config(llvm::OptimizationLevel::O2);
+ config.EnableOpenMP = true; // assume the input contains OpenMP
config.AliasAnalysis = true; // enabled when optimizing for speed
if (codeGenLLVM) {
// Run only CodeGen passes.
More information about the llvm-branch-commits
mailing list